Compare commits

..

2 Commits

Author SHA1 Message Date
16885eaa61 Support backup and restore of hardlinks
This tracks inode/device from the stat info and creates backward
compatible snapshots that allow preserving hardlinks. Backwards
compatibility is preserved by saving a virtual inode number index in the
Link field of the file entry. Since this field was previously only used
for symlinks, this won't break old versions. Additionally, the entry
data is cloned so restoration with an old version works.

Current limitations are primarility with restore. They include:
- no command line option to prevent hard link restore
- if a file has the immutable or append only flag it will be set before
hardlinks are restored, so hardlinking will fail.
- if a partial restore includes a hardlink but not the parent
directories the hardlink will fail.

These will be solved by grouping restore of hardlinks together
with file, prior to applying final metadata.

- if a file is changed and is being rewritten by a restore hardlinks are
not preserved.
2023-10-03 12:21:46 -05:00
bf2565b5c3 Initial implementation of file/inode flags (Linux, BSD, darwin)
Basic support for BSD and Darwin style chflags (stat flags). Applies
these flags at the end of file restore.
Supports linux style ioctl_iflags(2) in a 2 step process. Flags that
need to be applied prior to writes such as compress and especially no-COW
are applied immediately upon file open.

The flags format is backwards compatible. An attribute starting with a
null byte is used to store flags in the entry attributes table. With
an old version of duplicacy the restore of this attribute should silently
fail (effectively be ignored).

Fixes xattr restore to use O_NOFOLLOW so attributes are applied to symlink.

TODO: Tests, possible option to switch off mutable/append prior to
restore of existing file similar to rsync. Does not apply attributes
or flags to the top most directory.
2023-10-03 12:15:54 -05:00
9 changed files with 318 additions and 134 deletions

View File

@@ -304,24 +304,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
remoteEntry = nil
}
if localEntry.IsHardlinkedFrom() {
// FIXME: Sanity check?
// FIXME: perhaps we can make size = 0 an initial invariant of the link?
//
// Note that if the initial size was 0 then this original logic doesn't change!
localEntry.Size = 0
// targetEntry, ok := localEntryList.HardLinkTable[localEntry.Link]
// if !ok {
// LOG_ERROR("BACKUP_CREATE", "Hard link %s not found in entry cache for path %s", localEntry.Link, localEntry.Path)
// }
// localEntry.Size = targetEntry.Size
// localEntry.Hash = targetEntry.Hash
// localEntry.StartChunk = targetEntry.StartChunk
// localEntry.StartOffset = targetEntry.StartOffset
// localEntry.EndChunk = targetEntry.EndChunk
// localEntry.EndOffset = targetEntry.EndOffset
// LOG_DEBUG("BACKUP_CREATE", "Hard link %s to %s in initial listing", localEntry.Link, targetEntry.Path)
} else if compareResult == 0 {
if compareResult == 0 {
// No need to check if it is in hash mode -- in that case remote listing is nil
if localEntry.IsSameAs(remoteEntry) && localEntry.IsFile() {
if localEntry.Size > 0 {
@@ -639,11 +622,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
return true
}
type hardLinkEntry struct {
entry *Entry
willDownload bool
}
// Restore downloads the specified snapshot, compares it with what's on the repository, and then downloads
// files that are different. 'base' is a directory that contains files at a different revision which can
// serve as a local cache to avoid download chunks available locally. It is perfectly ok for 'base' to be
@@ -725,14 +703,17 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
var localEntry *Entry
localListingOK := true
hardLinkTable := make(map[string]hardLinkEntry)
//hardLinks := make([]*Entry, 0)
type hardLinkEntry struct {
entry *Entry
willDownload bool
}
var hardLinkTable []hardLinkEntry
var hardLinks []*Entry
for remoteEntry := range remoteListingChannel {
if remoteEntry.IsFile() && remoteEntry.Link == "/" {
LOG_INFO("RESTORE_LINK", "Noting hardlinked source file %s", remoteEntry.Path)
hardLinkTable[remoteEntry.Path] = hardLinkEntry{remoteEntry, false}
if remoteEntry.IsHardlinkRoot() {
hardLinkTable = append(hardLinkTable, hardLinkEntry{remoteEntry, false})
}
if len(patterns) > 0 && !MatchPath(remoteEntry.Path, patterns) {
@@ -743,8 +724,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
var compareResult int
for {
// TODO: We likely need to check if a local listing file exists in the hardLinkTable for the case where one is restoring a hardlink
// to an existing disk file. Right now, we'll just end up downloading the file new.
if localEntry == nil && localListingOK {
localEntry, localListingOK = <- localListingChannel
}
@@ -762,28 +741,12 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
}
if compareResult == 0 {
// if quickMode && localEntry.IsFile() {
if quickMode && localEntry.IsFile() && localEntry.IsSameAs(remoteEntry) {
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", localEntry.Path)
skippedFileSize += localEntry.Size
skippedFileCount++
localEntry = nil
continue
// checkEntry := remoteEntry
// if len(remoteEntry.Link) > 0 && remoteEntry.Link != "/" {
// if e, ok := hardLinkTable[remoteEntry.Link]; !ok {
// LOG_ERROR("RESTORE_LINK", "Source file %s for hardlink %s missing", remoteEntry.Link, remoteEntry.Path)
// } else {
// checkEntry = e.entry
// }
// }
// if localEntry.IsSameAs(checkEntry) {
// LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", localEntry.Path)
// skippedFileSize += localEntry.Size
// skippedFileCount++
// localEntry = nil
// continue
// }
if quickMode && localEntry.IsFile() && localEntry.IsSameAs(remoteEntry) {
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", localEntry.Path)
skippedFileSize += localEntry.Size
skippedFileCount++
localEntry = nil
continue
}
localEntry = nil
}
@@ -828,23 +791,24 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
return 0
}
}
remoteEntry.RestoreEarlyDirFlags(fullPath)
directoryEntries = append(directoryEntries, remoteEntry)
} else {
// if remoteEntry.Link == "/" {
// hardLinkTable[remoteEntry.Path] = hardLinkEntry{remoteEntry, true}
// } else if len(remoteEntry.Link) > 0 {
// if e, ok := hardLinkTable[remoteEntry.Link]; !ok {
// LOG_ERROR("RESTORE_LINK", "Source file %s for hardlink %s missing", remoteEntry.Link, remoteEntry.Path)
// } else if !e.willDownload {
// origSourcePath := e.entry.Path
// e.entry.Path = remoteEntry.Path
// remoteEntry = e.entry
// hardLinkTable[origSourcePath] = hardLinkEntry{remoteEntry, true}
// } else {
// hardLinks = append(hardLinks, remoteEntry)
// continue
// }
// }
if remoteEntry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1] = hardLinkEntry{remoteEntry, true}
} else if remoteEntry.IsHardlinkedFrom() {
i, err := strconv.ParseUint(remoteEntry.Link, 16, 64)
if err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Decode error in hardlink entry, expected hex int, got %s", remoteEntry.Link)
return 0
}
if !hardLinkTable[i].willDownload {
hardLinkTable[i] = hardLinkEntry{remoteEntry, true}
} else {
hardLinks = append(hardLinks, remoteEntry)
continue
}
}
// We can't download files here since fileEntries needs to be sorted
fileEntries = append(fileEntries, remoteEntry)
totalFileSize += remoteEntry.Size
@@ -900,10 +864,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
stat, _ := os.Stat(fullPath)
if stat != nil {
if quickMode {
// cmpFile := file
// if file.IsFile() && len(file.Link) > 0 && file.Link != "/" {
// cmpFile = hardLinkTable[file.Link].entry
// }
if file.IsSameAsFileInfo(stat) {
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path)
skippedFileSize += file.Size
@@ -941,6 +901,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
downloadedFileSize += file.Size
downloadedFiles = append(downloadedFiles, file)
}
continue
}
@@ -967,14 +928,16 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
file.RestoreMetadata(fullPath, nil, setOwner)
}
// for _, linkEntry := range hardLinks {
// sourcePath := joinPath(top, hardLinkTable[linkEntry.Link].entry.Path)
// fullPath := joinPath(top, linkEntry.Path)
// LOG_INFO("DOWNLOAD_LINK", "Hard linking %s -> %s", fullPath, sourcePath)
// if err := os.Link(sourcePath, fullPath); err != nil {
// LOG_ERROR("DOWNLOAD_LINK", "Failed to create hard link %s -> %s", fullPath, sourcePath)
// }
// }
for _, linkEntry := range hardLinks {
i, _ := strconv.ParseUint(linkEntry.Link, 16, 64)
sourcePath := joinPath(top, hardLinkTable[i].entry.Path)
fullPath := joinPath(top, linkEntry.Path)
LOG_INFO("RESTORE_HARDLINK", "Hard linking %s to %s", fullPath, sourcePath)
if err := os.Link(sourcePath, fullPath); err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Failed to create hard link %s to %s", fullPath, sourcePath)
return 0
}
}
if deleteMode && len(patterns) == 0 {
// Reverse the order to make sure directories are empty before being deleted
@@ -1127,11 +1090,13 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
lastEndChunk := 0
uploadEntryInfoFunc := func(entry *Entry) error {
if entry.IsHardlinkRoot() {
entryList.HardLinkTable[entry.Path] = entry
}
type hardLinkEntry struct {
entry *Entry
startChunk int
}
var hardLinkTable []hardLinkEntry
uploadEntryInfoFunc := func(entry *Entry) error {
if entry.IsFile() && entry.Size > 0 {
delta := entry.StartChunk - len(chunkHashes) + 1
if entry.StartChunk != lastChunk {
@@ -1149,18 +1114,38 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
entry.StartChunk -= delta
entry.EndChunk -= delta
if entry.IsHardlinkRoot() {
LOG_DEBUG("SNAPSHOT_UPLOAD", "Hard link root %s %v %v", entry.Path, entry.StartChunk, entry.EndChunk)
hardLinkTable = append(hardLinkTable, hardLinkEntry{entry, entry.StartChunk})
}
delta = entry.EndChunk - entry.StartChunk
entry.StartChunk -= lastEndChunk
lastEndChunk = entry.EndChunk
entry.EndChunk = delta
} else if entry.IsHardlinkedFrom() {
targetEntry, ok := entryList.HardLinkTable[entry.Link]
if !ok {
LOG_ERROR("SNAPSHOT_UPLOAD", "Unable to find hardlink target for %s to %s", entry.Path, entry.Link)
i, err := strconv.ParseUint(entry.Link, 16, 64)
if err != nil {
LOG_ERROR("SNAPSHOT_UPLOAD", "Decode error in hardlink entry, expected hex int, got %s", entry.Link)
return err
}
// FIXME: We will use a copy, so it is probably sufficient to skip rereading xattrs and such in the initial code
entry = entry.LinkTo(targetEntry)
LOG_DEBUG("SNAPSHOT_UPLOAD", "Uploading cloned hardlink entry for %s to %s", entry.Path, entry.Link)
targetEntry := hardLinkTable[i].entry
var startChunk, endChunk int
if targetEntry.Size > 0 {
startChunk = hardLinkTable[i].startChunk - lastEndChunk
endChunk = targetEntry.EndChunk
}
entry = entry.HardLinkTo(targetEntry, startChunk, endChunk)
if targetEntry.Size > 0 {
lastEndChunk = hardLinkTable[i].startChunk + endChunk
}
LOG_DEBUG("SNAPSHOT_UPLOAD", "Uploading cloned hardlink for %s to %s (%v %v)", entry.Path, targetEntry.Path, startChunk, endChunk)
} else if entry.IsHardlinkRoot() {
hardLinkTable = append(hardLinkTable, hardLinkEntry{entry, 0})
}
buffer.Reset()
@@ -1280,6 +1265,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
return false, nil
}
entry.RestoreEarlyFileFlags(existingFile)
n := int64(1)
// There is a go bug on Windows (https://github.com/golang/go/issues/21681) that causes Seek to fail
@@ -1463,6 +1449,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
return false, nil
}
}
entry.RestoreEarlyFileFlags(existingFile)
existingFile.Seek(0, 0)
@@ -1545,6 +1532,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
return false, nil
}
entry.RestoreEarlyFileFlags(newFile)
hasher := manager.config.NewFileHasher()

View File

@@ -119,7 +119,7 @@ func (entry *Entry) Copy() *Entry {
}
}
func (entry *Entry) LinkTo(target *Entry) *Entry {
func (entry *Entry) HardLinkTo(target *Entry, startChunk int, endChunk int) *Entry {
return &Entry{
Path: entry.Path,
Size: target.Size,
@@ -131,9 +131,9 @@ func (entry *Entry) LinkTo(target *Entry) *Entry {
UID: target.UID,
GID: target.GID,
StartChunk: target.StartChunk,
StartChunk: startChunk,
StartOffset: target.StartOffset,
EndChunk: target.EndChunk,
EndChunk: endChunk,
EndOffset: target.EndOffset,
Attributes: target.Attributes,
@@ -513,10 +513,6 @@ func (entry *Entry) IsComplete() bool {
return entry.Size >= 0
}
func (entry *Entry) IsFileNotHardlink() bool {
return entry.IsFile() && (len(entry.Link) == 0 || entry.Link == "/")
}
func (entry *Entry) IsHardlinkedFrom() bool {
return entry.IsFile() && len(entry.Link) > 0 && entry.Link != "/"
}
@@ -733,12 +729,13 @@ type listEntryLinkKey struct {
}
type ListingState struct {
linkTable map[listEntryLinkKey]string // map unique inode details to initially found path
linkIndex int
linkTable map[listEntryLinkKey]int // map unique inode details to initially found path
}
func NewListingState() *ListingState {
return &ListingState{
linkTable: make(map[listEntryLinkKey]string),
linkTable: make(map[listEntryLinkKey]int),
}
}
@@ -820,6 +817,30 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
}
}
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
skippedFiles = append(skippedFiles, entry.Path)
continue
}
var linkKey *listEntryLinkKey
if stat, ok := f.Sys().(*syscall.Stat_t); entry.IsFile() && ok && stat != nil && stat.Nlink > 1 {
k := listEntryLinkKey{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if linkIndex, seen := listingState.linkTable[k]; seen {
if linkIndex == -1 {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute (hardlink)", entry.Path)
continue
}
entry.Size = 0
entry.Link = strconv.FormatInt(int64(linkIndex), 16)
} else {
entry.Link = "/"
listingState.linkTable[k] = -1
linkKey = &k
}
}
entry.ReadAttributes(top)
if excludeByAttribute && entry.Attributes != nil && excludedByAttribute(*entry.Attributes) {
@@ -827,24 +848,9 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
continue
}
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
skippedFiles = append(skippedFiles, entry.Path)
continue
}
if entry.IsFile() {
stat, ok := f.Sys().(*syscall.Stat_t)
if ok && stat != nil && stat.Nlink > 1 {
k := listEntryLinkKey{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if path, ok := listingState.linkTable[k]; ok {
LOG_DEBUG("LIST_HARDLINK", "Detected hardlink %s to %s", entry.Path, path)
entry.Link = path
} else {
entry.Link = "/"
listingState.linkTable[k] = entry.Path
}
}
if linkKey != nil {
listingState.linkTable[*linkKey] = listingState.linkIndex
listingState.linkIndex++
}
if entry.IsDir() {

View File

@@ -62,7 +62,6 @@ type EntryList struct {
uploadedChunkIndex int // counter for upload chunks
uploadedChunkOffset int // the start offset for the current modified entry
HardLinkTable map[string]*Entry
}
// Create a new entry list
@@ -79,7 +78,6 @@ func CreateEntryList(snapshotID string, cachePath string, maximumInMemoryEntries
maximumInMemoryEntries: maximumInMemoryEntries,
cachePath: cachePath,
Token: string(token),
HardLinkTable: make(map[string]*Entry),
}
return entryList, nil
@@ -120,7 +118,7 @@ func (entryList *EntryList)AddEntry(entry *Entry) error {
if !entry.IsComplete() {
if entry.IsDir() || entry.IsLink() {
entry.Size = 0
} else if !entry.IsHardlinkedFrom() {
} else {
modifiedEntry := ModifiedEntry {
Path: entry.Path,
Size: -1,

View File

@@ -68,7 +68,7 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
skippedDirectories *[]string, skippedFiles *[]string) {
var patterns []string
var listingState = NewListingState()
listingState := NewListingState()
if filtersFile == "" {
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")

View File

@@ -0,0 +1,53 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd || darwin
// +build freebsd netbsd darwin
package duplicacy
import (
"encoding/binary"
"os"
"syscall"
)
const bsdFileFlagsKey = "\x00bf"
func (entry *Entry) ReadFileFlags(f *os.File) error {
fileInfo, err := f.Stat()
if err != nil {
return err
}
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if ok && stat.Flags != 0 {
if entry.Attributes == nil {
entry.Attributes = &map[string][]byte{}
}
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, stat.Flags)
(*entry.Attributes)[bsdFileFlagsKey] = v
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", stat.Flags, entry.Path)
}
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
LOG_DEBUG("ATTR_RESTORE", "Restore flags 0x%x for %s", binary.LittleEndian.Uint32(v), entry.Path)
return syscall.Fchflags(int(f.Fd()), int(binary.LittleEndian.Uint32(v)))
}
return nil
}

View File

@@ -0,0 +1,112 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"encoding/binary"
"os"
"syscall"
"unsafe"
)
const (
linux_FS_SECRM_FL = 0x00000001 /* Secure deletion */
linux_FS_UNRM_FL = 0x00000002 /* Undelete */
linux_FS_COMPR_FL = 0x00000004 /* Compress file */
linux_FS_SYNC_FL = 0x00000008 /* Synchronous updates */
linux_FS_IMMUTABLE_FL = 0x00000010 /* Immutable file */
linux_FS_APPEND_FL = 0x00000020 /* writes to file may only append */
linux_FS_NODUMP_FL = 0x00000040 /* do not dump file */
linux_FS_NOATIME_FL = 0x00000080 /* do not update atime */
linux_FS_NOCOMP_FL = 0x00000400 /* Don't compress */
linux_FS_JOURNAL_DATA_FL = 0x00004000 /* Reserved for ext3 */
linux_FS_NOTAIL_FL = 0x00008000 /* file tail should not be merged */
linux_FS_DIRSYNC_FL = 0x00010000 /* dirsync behaviour (directories only) */
linux_FS_TOPDIR_FL = 0x00020000 /* Top of directory hierarchies*/
linux_FS_NOCOW_FL = 0x00800000 /* Do not cow file */
linux_FS_PROJINHERIT_FL = 0x20000000 /* Create with parents projid */
linux_FS_IOC_GETFLAGS uintptr = 0x80086601
linux_FS_IOC_SETFLAGS uintptr = 0x40086602
linuxIocFlagsFileEarly = linux_FS_SECRM_FL | linux_FS_UNRM_FL | linux_FS_COMPR_FL | linux_FS_NODUMP_FL | linux_FS_NOATIME_FL | linux_FS_NOCOMP_FL | linux_FS_JOURNAL_DATA_FL | linux_FS_NOTAIL_FL | linux_FS_NOCOW_FL
linuxIocFlagsDirEarly = linux_FS_TOPDIR_FL | linux_FS_PROJINHERIT_FL
linuxIocFlagsLate = linux_FS_SYNC_FL | linux_FS_IMMUTABLE_FL | linux_FS_APPEND_FL | linux_FS_DIRSYNC_FL
linuxFileFlagsKey = "\x00lf"
)
func ioctl(f *os.File, request uintptr, attrp *uint32) error {
argp := uintptr(unsafe.Pointer(attrp))
if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, f.Fd(), request, argp); errno != 0 {
return os.NewSyscallError("ioctl", errno)
}
return nil
}
func (entry *Entry) ReadFileFlags(f *os.File) error {
var flags uint32
if err := ioctl(f, linux_FS_IOC_GETFLAGS, &flags); err != nil {
return err
}
if flags != 0 {
if entry.Attributes == nil {
entry.Attributes = &map[string][]byte{}
}
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, flags)
(*entry.Attributes)[linuxFileFlagsKey] = v
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", flags, entry.Path)
}
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
flags := binary.LittleEndian.Uint32(v) & linuxIocFlagsDirEarly
f, err := os.OpenFile(path, os.O_RDONLY|syscall.O_DIRECTORY, 0)
if err != nil {
return err
}
LOG_DEBUG("ATTR_RESTORE", "Restore dir flags (early) 0x%x for %s", flags, entry.Path)
err = ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
f.Close()
return err
}
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
flags := binary.LittleEndian.Uint32(v) & linuxIocFlagsFileEarly
LOG_DEBUG("ATTR_RESTORE", "Restore flags (early) 0x%x for %s", flags, entry.Path)
return ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
}
return nil
}
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
flags := binary.LittleEndian.Uint32(v) & (linuxIocFlagsFileEarly | linuxIocFlagsDirEarly | linuxIocFlagsLate)
LOG_DEBUG("ATTR_RESTORE", "Restore flags (late) 0x%x for %s", flags, entry.Path)
return ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
}
return nil
}
func excludedByAttribute(attributes map[string][]byte) bool {
_, ok := attributes["user.duplicacy_exclude"]
return ok
}

View File

@@ -48,9 +48,12 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
}
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
attributes, _ := xattr.List(fullPath)
f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return
}
attributes, _ := xattr.FList(f)
if len(attributes) > 0 {
entry.Attributes = &map[string][]byte{}
for _, name := range attributes {
@@ -60,30 +63,42 @@ func (entry *Entry) ReadAttributes(top string) {
}
}
}
if err := entry.ReadFileFlags(f); err != nil {
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
}
f.Close()
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
names, _ := xattr.List(fullPath)
f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return
}
names, _ := xattr.FList(f)
for _, name := range names {
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := xattr.Get(fullPath, name)
oldAttribute, _ := xattr.FGet(f, name)
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.Set(fullPath, name, newAttribute)
xattr.FSet(f, name, newAttribute)
}
delete(*entry.Attributes, name)
} else {
xattr.Remove(fullPath, name)
xattr.FRemove(f, name)
}
}
for name, attribute := range *entry.Attributes {
xattr.Set(fullPath, name, attribute)
if len(name) > 0 && name[0] == '\x00' {
continue
}
xattr.FSet(f, name, attribute)
}
if err := entry.RestoreLateFileFlags(f); err != nil {
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
}
f.Close()
}
func joinPath(components ...string) string {

View File

@@ -2,8 +2,8 @@
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd || linux || solaris
// +build freebsd netbsd linux solaris
//go:build freebsd || netbsd || solaris
// +build freebsd netbsd solaris
package duplicacy

View File

@@ -132,6 +132,18 @@ func SplitDir(fullPath string) (dir string, file string) {
return fullPath[:i+1], fullPath[i+1:]
}
func excludedByAttribute(attributes map[string][]byte) bool {
return false
func (entry *Entry) ReadFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
return nil
}