Compare commits

...

5 Commits

Author SHA1 Message Date
16885eaa61 Support backup and restore of hardlinks
This tracks inode/device from the stat info and creates backward
compatible snapshots that allow preserving hardlinks. Backwards
compatibility is preserved by saving a virtual inode number index in the
Link field of the file entry. Since this field was previously only used
for symlinks, this won't break old versions. Additionally, the entry
data is cloned so restoration with an old version works.

Current limitations are primarility with restore. They include:
- no command line option to prevent hard link restore
- if a file has the immutable or append only flag it will be set before
hardlinks are restored, so hardlinking will fail.
- if a partial restore includes a hardlink but not the parent
directories the hardlink will fail.

These will be solved by grouping restore of hardlinks together
with file, prior to applying final metadata.

- if a file is changed and is being rewritten by a restore hardlinks are
not preserved.
2023-10-03 12:21:46 -05:00
bf2565b5c3 Initial implementation of file/inode flags (Linux, BSD, darwin)
Basic support for BSD and Darwin style chflags (stat flags). Applies
these flags at the end of file restore.
Supports linux style ioctl_iflags(2) in a 2 step process. Flags that
need to be applied prior to writes such as compress and especially no-COW
are applied immediately upon file open.

The flags format is backwards compatible. An attribute starting with a
null byte is used to store flags in the entry attributes table. With
an old version of duplicacy the restore of this attribute should silently
fail (effectively be ignored).

Fixes xattr restore to use O_NOFOLLOW so attributes are applied to symlink.

TODO: Tests, possible option to switch off mutable/append prior to
restore of existing file similar to rsync. Does not apply attributes
or flags to the top most directory.
2023-10-03 12:15:54 -05:00
c07eef5063 Increase b2 client max file listing count to 10000
Considerable speed improvement with listing large storage.
2023-10-02 12:46:02 -05:00
2fdedcb9dd Fix exclude_by_attribute feature on POSIX
The exclude by attribute function is broken on non-Darwin POSIX: linux and freebsd.
This is because those xattrs must be prefixed by a legal namespace. The old xattr
library implicitly appended the user namespace to the xattr, but the current
official go pkg does not (which is just as well).

Also fix the test to remove the discordant old xattr dependency and provide
test cases for both darwin and non-darwin POSIX.
2023-10-02 12:41:50 -05:00
7bdd1cabd3 Use S3 ListObjectsV2 for listing files
ListObjects has been deprecated since 2016 and ListObjectsV2 with use of
explicit pagination tokens is more performant for large listings as well.

This also mitigates an issue with iDrive E2 where the StartAfter/Marker
is included in the output, leading to duplicate entries. Right now this
causes an exhaustive prune to delete chunks erroneously flagged as
duplicate, destroying the storage.
2023-10-02 12:41:50 -05:00
14 changed files with 408 additions and 87 deletions

1
go.mod
View File

@@ -15,7 +15,6 @@ require (
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c
github.com/gilbertchen/highwayhash v0.0.0-20221109044721-eeab1f4799d8
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01
github.com/hirochachacha/go-smb2 v1.1.0
github.com/klauspost/compress v1.16.3
github.com/klauspost/reedsolomon v1.9.9

2
go.sum
View File

@@ -47,8 +47,6 @@ github.com/gilbertchen/highwayhash v0.0.0-20221109044721-eeab1f4799d8 h1:ijgl4Y+
github.com/gilbertchen/highwayhash v0.0.0-20221109044721-eeab1f4799d8/go.mod h1:0lQcVva56+L1PuUFXLOsJ6arJQaU0baIH8q+IegeBhg=
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508 h1:SqTyk5KkNXp7zTdTttIZSDcTrL5uau4K/2OpKvgBZVI=
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508/go.mod h1:w/pisxUZezf2XzU9Ewjphcf6q1mZtOzKPHhJiuc8cag=
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01 h1:LqwS9qL6SrDkp0g0iwUkETrDdtB9gTKaIbSn9imUq5o=
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01/go.mod h1:TMlibuxKfkdtHyltooAw7+DHqRpaXs9nxaffk00Sh1Q=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 h1:G1U0vew/vA/1/hBmf1XNeyIzJJbPFVv+kb+HPl6rj6c=

View File

@@ -396,7 +396,7 @@ type B2ListFileNamesOutput struct {
func (client *B2Client) ListFileNames(threadIndex int, startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
maxFileCount := 1000
maxFileCount := 10_000
if singleFile {
if includeVersions {
maxFileCount = 4

View File

@@ -703,8 +703,19 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
var localEntry *Entry
localListingOK := true
type hardLinkEntry struct {
entry *Entry
willDownload bool
}
var hardLinkTable []hardLinkEntry
var hardLinks []*Entry
for remoteEntry := range remoteListingChannel {
if remoteEntry.IsHardlinkRoot() {
hardLinkTable = append(hardLinkTable, hardLinkEntry{remoteEntry, false})
}
if len(patterns) > 0 && !MatchPath(remoteEntry.Path, patterns) {
continue
}
@@ -780,8 +791,24 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
return 0
}
}
remoteEntry.RestoreEarlyDirFlags(fullPath)
directoryEntries = append(directoryEntries, remoteEntry)
} else {
if remoteEntry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1] = hardLinkEntry{remoteEntry, true}
} else if remoteEntry.IsHardlinkedFrom() {
i, err := strconv.ParseUint(remoteEntry.Link, 16, 64)
if err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Decode error in hardlink entry, expected hex int, got %s", remoteEntry.Link)
return 0
}
if !hardLinkTable[i].willDownload {
hardLinkTable[i] = hardLinkEntry{remoteEntry, true}
} else {
hardLinks = append(hardLinks, remoteEntry)
continue
}
}
// We can't download files here since fileEntries needs to be sorted
fileEntries = append(fileEntries, remoteEntry)
totalFileSize += remoteEntry.Size
@@ -901,6 +928,17 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
file.RestoreMetadata(fullPath, nil, setOwner)
}
for _, linkEntry := range hardLinks {
i, _ := strconv.ParseUint(linkEntry.Link, 16, 64)
sourcePath := joinPath(top, hardLinkTable[i].entry.Path)
fullPath := joinPath(top, linkEntry.Path)
LOG_INFO("RESTORE_HARDLINK", "Hard linking %s to %s", fullPath, sourcePath)
if err := os.Link(sourcePath, fullPath); err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Failed to create hard link %s to %s", fullPath, sourcePath)
return 0
}
}
if deleteMode && len(patterns) == 0 {
// Reverse the order to make sure directories are empty before being deleted
for i := range extraFiles {
@@ -1052,8 +1090,13 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
lastEndChunk := 0
uploadEntryInfoFunc := func(entry *Entry) error {
type hardLinkEntry struct {
entry *Entry
startChunk int
}
var hardLinkTable []hardLinkEntry
uploadEntryInfoFunc := func(entry *Entry) error {
if entry.IsFile() && entry.Size > 0 {
delta := entry.StartChunk - len(chunkHashes) + 1
if entry.StartChunk != lastChunk {
@@ -1071,10 +1114,38 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
entry.StartChunk -= delta
entry.EndChunk -= delta
if entry.IsHardlinkRoot() {
LOG_DEBUG("SNAPSHOT_UPLOAD", "Hard link root %s %v %v", entry.Path, entry.StartChunk, entry.EndChunk)
hardLinkTable = append(hardLinkTable, hardLinkEntry{entry, entry.StartChunk})
}
delta = entry.EndChunk - entry.StartChunk
entry.StartChunk -= lastEndChunk
lastEndChunk = entry.EndChunk
entry.EndChunk = delta
} else if entry.IsHardlinkedFrom() {
i, err := strconv.ParseUint(entry.Link, 16, 64)
if err != nil {
LOG_ERROR("SNAPSHOT_UPLOAD", "Decode error in hardlink entry, expected hex int, got %s", entry.Link)
return err
}
targetEntry := hardLinkTable[i].entry
var startChunk, endChunk int
if targetEntry.Size > 0 {
startChunk = hardLinkTable[i].startChunk - lastEndChunk
endChunk = targetEntry.EndChunk
}
entry = entry.HardLinkTo(targetEntry, startChunk, endChunk)
if targetEntry.Size > 0 {
lastEndChunk = hardLinkTable[i].startChunk + endChunk
}
LOG_DEBUG("SNAPSHOT_UPLOAD", "Uploading cloned hardlink for %s to %s (%v %v)", entry.Path, targetEntry.Path, startChunk, endChunk)
} else if entry.IsHardlinkRoot() {
hardLinkTable = append(hardLinkTable, hardLinkEntry{entry, 0})
}
buffer.Reset()
@@ -1194,6 +1265,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
return false, nil
}
entry.RestoreEarlyFileFlags(existingFile)
n := int64(1)
// There is a go bug on Windows (https://github.com/golang/go/issues/21681) that causes Seek to fail
@@ -1377,6 +1449,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
return false, nil
}
}
entry.RestoreEarlyFileFlags(existingFile)
existingFile.Seek(0, 0)
@@ -1459,6 +1532,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
return false, nil
}
entry.RestoreEarlyFileFlags(newFile)
hasher := manager.config.NewFileHasher()

View File

@@ -4,6 +4,8 @@
package duplicacy
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
@@ -15,12 +17,10 @@ import (
"sort"
"strconv"
"strings"
"syscall"
"time"
"bytes"
"crypto/sha256"
"github.com/vmihailenco/msgpack"
"github.com/vmihailenco/msgpack"
)
// This is the hidden directory in the repository for storing various files.
@@ -110,15 +110,36 @@ func (entry *Entry) Copy() *Entry {
UID: entry.UID,
GID: entry.GID,
StartChunk: entry.StartChunk,
StartChunk: entry.StartChunk,
StartOffset: entry.StartOffset,
EndChunk: entry.EndChunk,
EndOffset: entry.EndOffset,
EndChunk: entry.EndChunk,
EndOffset: entry.EndOffset,
Attributes: entry.Attributes,
}
}
func (entry *Entry) HardLinkTo(target *Entry, startChunk int, endChunk int) *Entry {
return &Entry{
Path: entry.Path,
Size: target.Size,
Time: target.Time,
Mode: target.Mode,
Link: entry.Link,
Hash: target.Hash,
UID: target.UID,
GID: target.GID,
StartChunk: startChunk,
StartOffset: target.StartOffset,
EndChunk: endChunk,
EndOffset: target.EndOffset,
Attributes: target.Attributes,
}
}
// CreateEntryFromJSON creates an entry from a json description.
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
@@ -362,12 +383,12 @@ func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error {
if entry.Attributes != nil {
attributes := make([]string, numberOfAttributes)
i := 0
for attribute := range *entry.Attributes {
attributes[i] = attribute
i++
}
sort.Strings(attributes)
i := 0
for attribute := range *entry.Attributes {
attributes[i] = attribute
i++
}
sort.Strings(attributes)
for _, attribute := range attributes {
err = encoder.EncodeString(attribute)
if err != nil {
@@ -380,7 +401,7 @@ func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error {
}
}
return nil
return nil
}
func (entry *Entry) DecodeMsgpack(decoder *msgpack.Decoder) error {
@@ -492,14 +513,22 @@ func (entry *Entry) IsComplete() bool {
return entry.Size >= 0
}
func (entry *Entry) IsHardlinkedFrom() bool {
return entry.IsFile() && len(entry.Link) > 0 && entry.Link != "/"
}
func (entry *Entry) IsHardlinkRoot() bool {
return entry.IsFile() && entry.Link == "/"
}
func (entry *Entry) GetPermissions() os.FileMode {
return os.FileMode(entry.Mode) & fileModeMask
}
func (entry *Entry) GetParent() string {
path := entry.Path
if path != "" && path[len(path) - 1] == '/' {
path = path[:len(path) - 1]
if path != "" && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
i := strings.LastIndex(path, "/")
if i == -1 {
@@ -596,7 +625,7 @@ func ComparePaths(left string, right string) int {
for i := p; i < len(left); i++ {
c3 = left[i]
if c3 == '/' {
last1 = i == len(left) - 1
last1 = i == len(left)-1
break
}
}
@@ -606,7 +635,7 @@ func ComparePaths(left string, right string) int {
for i := p; i < len(right); i++ {
c4 = right[i]
if c4 == '/' {
last2 = i == len(right) - 1
last2 = i == len(right)-1
break
}
}
@@ -694,10 +723,27 @@ func (files FileInfoCompare) Less(i, j int) bool {
}
}
type listEntryLinkKey struct {
dev uint64
ino uint64
}
type ListingState struct {
linkIndex int
linkTable map[listEntryLinkKey]int // map unique inode details to initially found path
}
func NewListingState() *ListingState {
return &ListingState{
linkTable: make(map[listEntryLinkKey]int),
}
}
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool, listingChannel chan *Entry) (directoryList []*Entry,
skippedFiles []string, err error) {
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool,
listingState *ListingState,
listingChannel chan *Entry) (directoryList []*Entry, skippedFiles []string, err error) {
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
@@ -771,6 +817,30 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
}
}
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
skippedFiles = append(skippedFiles, entry.Path)
continue
}
var linkKey *listEntryLinkKey
if stat, ok := f.Sys().(*syscall.Stat_t); entry.IsFile() && ok && stat != nil && stat.Nlink > 1 {
k := listEntryLinkKey{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if linkIndex, seen := listingState.linkTable[k]; seen {
if linkIndex == -1 {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute (hardlink)", entry.Path)
continue
}
entry.Size = 0
entry.Link = strconv.FormatInt(int64(linkIndex), 16)
} else {
entry.Link = "/"
listingState.linkTable[k] = -1
linkKey = &k
}
}
entry.ReadAttributes(top)
if excludeByAttribute && entry.Attributes != nil && excludedByAttribute(*entry.Attributes) {
@@ -778,10 +848,9 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
continue
}
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
skippedFiles = append(skippedFiles, entry.Path)
continue
if linkKey != nil {
listingState.linkTable[*linkKey] = listingState.linkIndex
listingState.linkIndex++
}
if entry.IsDir() {

View File

@@ -5,6 +5,8 @@
package duplicacy
import (
"bytes"
"encoding/json"
"io/ioutil"
"math/rand"
"os"
@@ -13,11 +15,10 @@ import (
"sort"
"strings"
"testing"
"bytes"
"encoding/json"
"github.com/gilbertchen/xattr"
"github.com/vmihailenco/msgpack"
"github.com/pkg/xattr"
"github.com/vmihailenco/msgpack"
)
func TestEntrySort(t *testing.T) {
@@ -175,7 +176,7 @@ func TestEntryOrder(t *testing.T) {
directories = append(directories, CreateEntry("", 0, 0, 0))
entries := make([]*Entry, 0, 4)
entryChannel := make(chan *Entry, 1024)
entryChannel := make(chan *Entry, 1024)
entries = append(entries, CreateEntry("", 0, 0, 0))
for len(directories) > 0 {
@@ -233,8 +234,16 @@ func TestEntryOrder(t *testing.T) {
// TestEntryExcludeByAttribute tests the excludeByAttribute parameter to the ListEntries function
func TestEntryExcludeByAttribute(t *testing.T) {
if !(runtime.GOOS == "darwin" || runtime.GOOS == "linux") {
t.Skip("skipping test not darwin or linux")
var excludeAttrName string
var excludeAttrValue []byte
if runtime.GOOS == "darwin" {
excludeAttrName = "com.apple.metadata:com_apple_backup_excludeItem"
excludeAttrValue = []byte("com.apple.backupd")
} else if runtime.GOOS == "linux" || runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" || runtime.GOOS == "solaris" {
excludeAttrName = "user.duplicacy_exclude"
} else {
t.Skip("skipping test, not darwin, linux, freebsd, netbsd, or solaris")
}
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
@@ -273,7 +282,7 @@ func TestEntryExcludeByAttribute(t *testing.T) {
for _, file := range DATA {
fullPath := filepath.Join(testDir, file)
if strings.Contains(file, "exclude") {
xattr.Setxattr(fullPath, "com.apple.metadata:com_apple_backup_excludeItem", []byte("com.apple.backupd"))
xattr.Set(fullPath, excludeAttrName, excludeAttrValue)
}
}
@@ -372,4 +381,4 @@ func TestEntryEncoding(t *testing.T) {
t.Error("Decoded entry is different than the original one")
}
}
}

View File

@@ -90,48 +90,40 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
if dir == "snapshots/" {
dir = storage.storageDir + dir
input := s3.ListObjectsInput{
input := s3.ListObjectsV2Input{
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1000),
}
output, err := storage.client.ListObjects(&input)
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, subDir := range page.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return true
})
if err != nil {
return nil, nil, err
}
for _, subDir := range output.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return files, nil, nil
} else {
dir = storage.storageDir + dir
marker := ""
for {
input := s3.ListObjectsInput{
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000),
Marker: aws.String(marker),
}
input := s3.ListObjectsV2Input{
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000),
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
for _, object := range output.Contents {
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
files = append(files, (*object.Key)[len(dir):])
sizes = append(sizes, *object.Size)
}
if !*output.IsTruncated {
break
}
marker = *output.Contents[len(output.Contents)-1].Key
return true
})
if err != nil {
return nil, nil, err
}
return files, sizes, nil
}

View File

@@ -68,6 +68,7 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
skippedDirectories *[]string, skippedFiles *[]string) {
var patterns []string
listingState := NewListingState()
if filtersFile == "" {
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
@@ -81,7 +82,7 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingChannel)
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingState, listingChannel)
if err != nil {
if directory.Path == "" {
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)

View File

@@ -0,0 +1,53 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd || darwin
// +build freebsd netbsd darwin
package duplicacy
import (
"encoding/binary"
"os"
"syscall"
)
const bsdFileFlagsKey = "\x00bf"
func (entry *Entry) ReadFileFlags(f *os.File) error {
fileInfo, err := f.Stat()
if err != nil {
return err
}
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if ok && stat.Flags != 0 {
if entry.Attributes == nil {
entry.Attributes = &map[string][]byte{}
}
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, stat.Flags)
(*entry.Attributes)[bsdFileFlagsKey] = v
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", stat.Flags, entry.Path)
}
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
LOG_DEBUG("ATTR_RESTORE", "Restore flags 0x%x for %s", binary.LittleEndian.Uint32(v), entry.Path)
return syscall.Fchflags(int(f.Fd()), int(binary.LittleEndian.Uint32(v)))
}
return nil
}

View File

@@ -8,7 +8,7 @@ import (
"strings"
)
func excludedByAttribute(attirbutes map[string][]byte) bool {
value, ok := attirbutes["com.apple.metadata:com_apple_backup_excludeItem"]
func excludedByAttribute(attributes map[string][]byte) bool {
value, ok := attributes["com.apple.metadata:com_apple_backup_excludeItem"]
return ok && strings.Contains(string(value), "com.apple.backupd")
}

View File

@@ -5,9 +5,108 @@
package duplicacy
import (
"encoding/binary"
"os"
"syscall"
"unsafe"
)
func excludedByAttribute(attirbutes map[string][]byte) bool {
_, ok := attirbutes["duplicacy_exclude"]
const (
linux_FS_SECRM_FL = 0x00000001 /* Secure deletion */
linux_FS_UNRM_FL = 0x00000002 /* Undelete */
linux_FS_COMPR_FL = 0x00000004 /* Compress file */
linux_FS_SYNC_FL = 0x00000008 /* Synchronous updates */
linux_FS_IMMUTABLE_FL = 0x00000010 /* Immutable file */
linux_FS_APPEND_FL = 0x00000020 /* writes to file may only append */
linux_FS_NODUMP_FL = 0x00000040 /* do not dump file */
linux_FS_NOATIME_FL = 0x00000080 /* do not update atime */
linux_FS_NOCOMP_FL = 0x00000400 /* Don't compress */
linux_FS_JOURNAL_DATA_FL = 0x00004000 /* Reserved for ext3 */
linux_FS_NOTAIL_FL = 0x00008000 /* file tail should not be merged */
linux_FS_DIRSYNC_FL = 0x00010000 /* dirsync behaviour (directories only) */
linux_FS_TOPDIR_FL = 0x00020000 /* Top of directory hierarchies*/
linux_FS_NOCOW_FL = 0x00800000 /* Do not cow file */
linux_FS_PROJINHERIT_FL = 0x20000000 /* Create with parents projid */
linux_FS_IOC_GETFLAGS uintptr = 0x80086601
linux_FS_IOC_SETFLAGS uintptr = 0x40086602
linuxIocFlagsFileEarly = linux_FS_SECRM_FL | linux_FS_UNRM_FL | linux_FS_COMPR_FL | linux_FS_NODUMP_FL | linux_FS_NOATIME_FL | linux_FS_NOCOMP_FL | linux_FS_JOURNAL_DATA_FL | linux_FS_NOTAIL_FL | linux_FS_NOCOW_FL
linuxIocFlagsDirEarly = linux_FS_TOPDIR_FL | linux_FS_PROJINHERIT_FL
linuxIocFlagsLate = linux_FS_SYNC_FL | linux_FS_IMMUTABLE_FL | linux_FS_APPEND_FL | linux_FS_DIRSYNC_FL
linuxFileFlagsKey = "\x00lf"
)
func ioctl(f *os.File, request uintptr, attrp *uint32) error {
argp := uintptr(unsafe.Pointer(attrp))
if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, f.Fd(), request, argp); errno != 0 {
return os.NewSyscallError("ioctl", errno)
}
return nil
}
func (entry *Entry) ReadFileFlags(f *os.File) error {
var flags uint32
if err := ioctl(f, linux_FS_IOC_GETFLAGS, &flags); err != nil {
return err
}
if flags != 0 {
if entry.Attributes == nil {
entry.Attributes = &map[string][]byte{}
}
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, flags)
(*entry.Attributes)[linuxFileFlagsKey] = v
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", flags, entry.Path)
}
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
flags := binary.LittleEndian.Uint32(v) & linuxIocFlagsDirEarly
f, err := os.OpenFile(path, os.O_RDONLY|syscall.O_DIRECTORY, 0)
if err != nil {
return err
}
LOG_DEBUG("ATTR_RESTORE", "Restore dir flags (early) 0x%x for %s", flags, entry.Path)
err = ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
f.Close()
return err
}
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
flags := binary.LittleEndian.Uint32(v) & linuxIocFlagsFileEarly
LOG_DEBUG("ATTR_RESTORE", "Restore flags (early) 0x%x for %s", flags, entry.Path)
return ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
}
return nil
}
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
flags := binary.LittleEndian.Uint32(v) & (linuxIocFlagsFileEarly | linuxIocFlagsDirEarly | linuxIocFlagsLate)
LOG_DEBUG("ATTR_RESTORE", "Restore flags (late) 0x%x for %s", flags, entry.Path)
return ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
}
return nil
}
func excludedByAttribute(attributes map[string][]byte) bool {
_, ok := attributes["user.duplicacy_exclude"]
return ok
}

View File

@@ -48,9 +48,12 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
}
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
attributes, _ := xattr.List(fullPath)
f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return
}
attributes, _ := xattr.FList(f)
if len(attributes) > 0 {
entry.Attributes = &map[string][]byte{}
for _, name := range attributes {
@@ -60,30 +63,42 @@ func (entry *Entry) ReadAttributes(top string) {
}
}
}
if err := entry.ReadFileFlags(f); err != nil {
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
}
f.Close()
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
names, _ := xattr.List(fullPath)
f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return
}
names, _ := xattr.FList(f)
for _, name := range names {
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := xattr.Get(fullPath, name)
oldAttribute, _ := xattr.FGet(f, name)
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.Set(fullPath, name, newAttribute)
xattr.FSet(f, name, newAttribute)
}
delete(*entry.Attributes, name)
} else {
xattr.Remove(fullPath, name)
xattr.FRemove(f, name)
}
}
for name, attribute := range *entry.Attributes {
xattr.Set(fullPath, name, attribute)
if len(name) > 0 && name[0] == '\x00' {
continue
}
xattr.FSet(f, name, attribute)
}
if err := entry.RestoreLateFileFlags(f); err != nil {
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
}
f.Close()
}
func joinPath(components ...string) string {

View File

@@ -2,12 +2,12 @@
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd || solaris
// +build freebsd netbsd solaris
package duplicacy
import (
)
func excludedByAttribute(attirbutes map[string][]byte) bool {
_, ok := attirbutes["duplicacy_exclude"]
func excludedByAttribute(attributes map[string][]byte) bool {
_, ok := attributes["user.duplicacy_exclude"]
return ok
}
}

View File

@@ -132,6 +132,18 @@ func SplitDir(fullPath string) (dir string, file string) {
return fullPath[:i+1], fullPath[i+1:]
}
func excludedByAttribute(attirbutes map[string][]byte) bool {
return false
func (entry *Entry) ReadFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
return nil
}