Normalize and add options for backup, add more file flags options

- Move more function arguments to structs with some convenience
functions.
- Add a flag to enable backup/restore of file flags. It works a little
bit differently depending on system. Since we don't need to do anything
to get file flags on darwin/BSD, it's in the stat buffer so just save it.
For linux we have to open the file and do a
- Add the normalize flag to ReadAttributes. Implementation TBD.
- stub out common xattr file for doc comments
This commit is contained in:
2023-10-06 22:10:38 -05:00
parent 58d21eb17a
commit 518d02a57d
14 changed files with 320 additions and 162 deletions

View File

@@ -789,9 +789,14 @@ func backupRepository(context *cli.Context) {
storage.SetRateLimits(0, uploadRateLimit) storage.SetRateLimits(0, uploadRateLimit)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password,
&duplicacy.BackupManagerOptions{ &duplicacy.BackupManagerOptions{
NoBackupFile: preference.NobackupFile, NobackupFile: preference.NobackupFile,
FiltersFile: preference.FiltersFile, FiltersFile: preference.FiltersFile,
ExcludeByAttribute: preference.ExcludeByAttribute, ExcludeByAttribute: preference.ExcludeByAttribute,
ExcludeXattrs: preference.ExcludeXattrs,
NormalizeXattrs: preference.NormalizeXattrs,
IncludeFileFlags: preference.IncludeFileFlags,
IncludeSpecials: preference.IncludeSpecials,
FileFlagsMask: uint32(preference.FileFlagsMask),
}) })
duplicacy.SavePassword(*preference, "password", password) duplicacy.SavePassword(*preference, "password", password)
@@ -885,7 +890,7 @@ func restoreRepository(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password,
&duplicacy.BackupManagerOptions{ &duplicacy.BackupManagerOptions{
NoBackupFile: preference.NobackupFile, NobackupFile: preference.NobackupFile,
FiltersFile: preference.FiltersFile, FiltersFile: preference.FiltersFile,
ExcludeByAttribute: preference.ExcludeByAttribute, ExcludeByAttribute: preference.ExcludeByAttribute,
SetOwner: excludeOwner, SetOwner: excludeOwner,
@@ -1128,7 +1133,8 @@ func diff(context *cli.Context) {
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false) loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name) backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute) backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash,
duplicacy.NewListFilesOptions(preference))
runScript(context, preference.Name, "post") runScript(context, preference.Name, "post")
} }

View File

@@ -9,6 +9,7 @@ import (
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"io" "io"
"math"
"os" "os"
"path" "path"
"path/filepath" "path/filepath"
@@ -39,7 +40,7 @@ type BackupManager struct {
} }
type BackupManagerOptions struct { type BackupManagerOptions struct {
NoBackupFile string // don't backup directory when this file name is found NobackupFile string // don't backup directory when this file name is found
FiltersFile string // the path to the filters file FiltersFile string // the path to the filters file
ExcludeByAttribute bool // don't backup file based on file attribute ExcludeByAttribute bool // don't backup file based on file attribute
SetOwner bool SetOwner bool
@@ -69,6 +70,14 @@ func (manager *BackupManager) SetCompressionLevel(level int) {
manager.config.CompressionLevel = level manager.config.CompressionLevel = level
} }
func (manager *BackupManager) Config() *Config {
return manager.config
}
func (manager *BackupManager) SnapshotCache() *FileStorage {
return manager.snapshotCache
}
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to // CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the // identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
// master key which can be nil if encryption is not enabled. // master key which can be nil if encryption is not enabled.
@@ -94,7 +103,6 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
SnapshotManager: snapshotManager, SnapshotManager: snapshotManager,
config: config, config: config,
options: *options,
} }
if options != nil { if options != nil {
backupManager.options = *options backupManager.options = *options
@@ -148,8 +156,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
func (manager *BackupManager) Backup(top string, quickMode bool, threads int, tag string, func (manager *BackupManager) Backup(top string, quickMode bool, threads int, tag string,
showStatistics bool, shadowCopy bool, shadowCopyTimeout int, enumOnly bool, metadataChunkSize int, maximumInMemoryEntries int) bool { showStatistics bool, shadowCopy bool, shadowCopyTimeout int, enumOnly bool, metadataChunkSize int, maximumInMemoryEntries int) bool {
var err error top, err := filepath.Abs(top)
top, err = filepath.Abs(top)
if err != nil { if err != nil {
LOG_ERROR("REPOSITORY_ERR", "Failed to obtain the absolute path of the repository: %v", err) LOG_ERROR("REPOSITORY_ERR", "Failed to obtain the absolute path of the repository: %v", err)
return false return false
@@ -255,8 +262,16 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
go func() { go func() {
// List local files // List local files
defer CatchLogException() defer CatchLogException()
localSnapshot.ListLocalFiles(shadowTop, manager.options.NoBackupFile, manager.options.FiltersFile, localSnapshot.ListLocalFiles(shadowTop, localListingChannel, &skippedDirectories, &skippedFiles,
manager.options.ExcludeByAttribute, localListingChannel, &skippedDirectories, &skippedFiles) &ListFilesOptions{
NoBackupFile: manager.options.NobackupFile,
FiltersFile: manager.options.FiltersFile,
ExcludeByAttribute: manager.options.ExcludeByAttribute,
ExcludeXattrs: manager.options.ExcludeXattrs,
NormalizeXattr: manager.options.NormalizeXattrs,
IncludeFileFlags: manager.options.IncludeFileFlags,
IncludeSpecials: manager.options.IncludeSpecials,
})
}() }()
go func() { go func() {
@@ -654,10 +669,11 @@ func (manager *BackupManager) Restore(top string, revision int, options *Restore
allowFailures := options.AllowFailures allowFailures := options.AllowFailures
metadataOptions := RestoreMetadataOptions{ metadataOptions := RestoreMetadataOptions{
SetOwner: manager.options.SetOwner, SetOwner: manager.options.SetOwner,
ExcludeXattrs: manager.options.ExcludeXattrs, ExcludeXattrs: manager.options.ExcludeXattrs,
NormalizeXattrs: manager.options.NormalizeXattrs, NormalizeXattrs: manager.options.NormalizeXattrs,
FileFlagsMask: manager.options.FileFlagsMask, IncludeFileFlags: manager.options.IncludeFileFlags,
FileFlagsMask: manager.options.FileFlagsMask,
} }
startTime := time.Now().Unix() startTime := time.Now().Unix()
@@ -716,8 +732,16 @@ func (manager *BackupManager) Restore(top string, revision int, options *Restore
go func() { go func() {
// List local files // List local files
defer CatchLogException() defer CatchLogException()
localSnapshot.ListLocalFiles(top, manager.options.NoBackupFile, manager.options.FiltersFile, localSnapshot.ListLocalFiles(top, localListingChannel, nil, nil,
manager.options.ExcludeByAttribute, localListingChannel, nil, nil) &ListFilesOptions{
NoBackupFile: manager.options.NobackupFile,
FiltersFile: manager.options.FiltersFile,
ExcludeByAttribute: manager.options.ExcludeByAttribute,
ExcludeXattrs: manager.options.ExcludeXattrs,
NormalizeXattr: manager.options.NormalizeXattrs,
IncludeFileFlags: manager.options.IncludeFileFlags,
IncludeSpecials: manager.options.IncludeSpecials,
})
}() }()
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision) remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
@@ -859,9 +883,11 @@ func (manager *BackupManager) Restore(top string, revision int, options *Restore
return 0 return 0
} }
} }
err = remoteEntry.RestoreEarlyDirFlags(fullPath, manager.options.FileFlagsMask) if metadataOptions.IncludeFileFlags {
if err != nil { err = remoteEntry.RestoreEarlyDirFlags(fullPath, manager.options.FileFlagsMask)
LOG_WARN("DOWNLOAD_FLAGS", "Failed to set early file flags on %s: %v", fullPath, err) if err != nil {
LOG_WARN("DOWNLOAD_FLAGS", "Failed to set early file flags on %s: %v", fullPath, err)
}
} }
directoryEntries = append(directoryEntries, remoteEntry) directoryEntries = append(directoryEntries, remoteEntry)
} else if remoteEntry.IsSpecial() && manager.options.IncludeSpecials { } else if remoteEntry.IsSpecial() && manager.options.IncludeSpecials {
@@ -1005,9 +1031,13 @@ func (manager *BackupManager) Restore(top string, revision int, options *Restore
continue continue
} }
fileFlagsMask := metadataOptions.FileFlagsMask
if !metadataOptions.IncludeFileFlags {
fileFlagsMask = math.MaxUint32
}
downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, options.InPlace, overwrite, downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, options.InPlace, overwrite,
options.ShowStatistics, totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures, options.ShowStatistics, totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures,
metadataOptions.FileFlagsMask) fileFlagsMask)
if err != nil { if err != nil {
// RestoreFile returned an error; if allowFailures is false RestoerFile would error out and not return so here // RestoreFile returned an error; if allowFailures is false RestoerFile would error out and not return so here
// we just need to show a warning // we just need to show a warning

View File

@@ -583,10 +583,11 @@ func (entry *Entry) String(maxSizeDigits int) string {
} }
type RestoreMetadataOptions struct { type RestoreMetadataOptions struct {
SetOwner bool SetOwner bool
ExcludeXattrs bool ExcludeXattrs bool
NormalizeXattrs bool NormalizeXattrs bool
FileFlagsMask uint32 IncludeFileFlags bool
FileFlagsMask uint32
} }
func (entry *Entry) RestoreMetadata(fullPath string, fileInfo os.FileInfo, func (entry *Entry) RestoreMetadata(fullPath string, fileInfo os.FileInfo,
@@ -634,9 +635,11 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo os.FileInfo,
} }
} }
err := entry.RestoreLateFileFlags(fullPath, fileInfo, options.FileFlagsMask) if options.IncludeFileFlags {
if err != nil { err := entry.RestoreLateFileFlags(fullPath, fileInfo, options.FileFlagsMask)
LOG_WARN("RESTORE_FLAGS", "Failed to set file flags on %s: %v", entry.Path, err) if err != nil {
LOG_WARN("RESTORE_FLAGS", "Failed to set file flags on %s: %v", entry.Path, err)
}
} }
return true return true
@@ -769,22 +772,39 @@ func (files FileInfoCompare) Less(i, j int) bool {
} }
} }
type ListingState struct { type EntryListerOptions struct {
Patterns []string
NoBackupFile string
ExcludeByAttribute bool
ExcludeXattrs bool
NormalizeXattr bool
IncludeFileFlags bool
IncludeSpecials bool
}
type EntryLister interface {
ListDir(top string, path string, listingChannel chan *Entry, options *EntryListerOptions) (directoryList []*Entry, skippedFiles []string, err error)
}
type LocalDirectoryLister struct {
linkIndex int linkIndex int
linkTable map[listEntryLinkKey]int // map unique inode details to initially found path linkTable map[listEntryLinkKey]int // map unique inode details to initially found path
} }
func NewListingState() *ListingState { func NewLocalDirectoryLister() *LocalDirectoryLister {
return &ListingState{ return &LocalDirectoryLister{
linkTable: make(map[listEntryLinkKey]int), linkTable: make(map[listEntryLinkKey]int),
} }
} }
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths // ListDir returns a list of entries representing file and subdirectories under the directory 'path'.
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files. // Entry paths are normalized as relative to 'top'.
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool, func (dl *LocalDirectoryLister) ListDir(top string, path string, listingChannel chan *Entry,
listingState *ListingState, options *EntryListerOptions) (directoryList []*Entry, skippedFiles []string, err error) {
listingChannel chan *Entry) (directoryList []*Entry, skippedFiles []string, err error) {
if options == nil {
options = &EntryListerOptions{}
}
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path) LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
@@ -797,10 +817,12 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
return directoryList, nil, err return directoryList, nil, err
} }
patterns := options.Patterns
// This binary search works because ioutil.ReadDir returns files sorted by Name() by default // This binary search works because ioutil.ReadDir returns files sorted by Name() by default
if nobackupFile != "" { if options.NoBackupFile != "" {
ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0 }) ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), options.NoBackupFile) >= 0 })
if ii < len(files) && files[ii].Name() == nobackupFile { if ii < len(files) && files[ii].Name() == options.NoBackupFile {
LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path) LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path)
return directoryList, skippedFiles, nil return directoryList, skippedFiles, nil
} }
@@ -830,7 +852,7 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
linkKey, isHardLinked := entry.getHardLinkKey(f) linkKey, isHardLinked := entry.getHardLinkKey(f)
if isHardLinked { if isHardLinked {
if linkIndex, seen := listingState.linkTable[linkKey]; seen { if linkIndex, seen := dl.linkTable[linkKey]; seen {
if linkIndex == -1 { if linkIndex == -1 {
LOG_DEBUG("LIST_EXCLUDE", "%s was excluded or skipped (hard link)", entry.Path) LOG_DEBUG("LIST_EXCLUDE", "%s was excluded or skipped (hard link)", entry.Path)
continue continue
@@ -851,7 +873,7 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
} else { } else {
entry.EndChunk = entryHardLinkRootChunkMarker entry.EndChunk = entryHardLinkRootChunkMarker
} }
listingState.linkTable[linkKey] = -1 dl.linkTable[linkKey] = -1
} }
} }
@@ -887,7 +909,7 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
} }
entry = newEntry entry = newEntry
} }
} else if entry.IsSpecial() { } else if options.IncludeSpecials && entry.IsSpecial() {
if err := entry.ReadSpecial(fullPath, f); err != nil { if err := entry.ReadSpecial(fullPath, f); err != nil {
LOG_WARN("LIST_DEV", "Failed to save device node %s: %v", entry.Path, err) LOG_WARN("LIST_DEV", "Failed to save device node %s: %v", entry.Path, err)
skippedFiles = append(skippedFiles, entry.Path) skippedFiles = append(skippedFiles, entry.Path)
@@ -895,22 +917,27 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
} }
} }
if err := entry.ReadAttributes(fullPath, f); err != nil { if !options.ExcludeXattrs {
LOG_WARN("LIST_ATTR", "Failed to read xattrs on %s: %v", entry.Path, err) if err := entry.ReadAttributes(f, fullPath, false); err != nil {
LOG_WARN("LIST_ATTR", "Failed to read xattrs on %s: %v", entry.Path, err)
}
} }
if err := entry.ReadFileFlags(fullPath, f); err != nil { // if the flags are already in the FileInfo we can keep them
LOG_WARN("LIST_ATTR", "Failed to read file flags on %s: %v", entry.Path, err) if !entry.GetFileFlags(f) && options.IncludeFileFlags {
if err := entry.ReadFileFlags(f, fullPath); err != nil {
LOG_WARN("LIST_ATTR", "Failed to read file flags on %s: %v", entry.Path, err)
}
} }
if excludeByAttribute && entry.Attributes != nil && excludedByAttribute(*entry.Attributes) { if options.ExcludeByAttribute && entry.Attributes != nil && excludedByAttribute(*entry.Attributes) {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute", entry.Path) LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute", entry.Path)
continue continue
} }
if isHardLinked { if isHardLinked {
listingState.linkTable[linkKey] = listingState.linkIndex dl.linkTable[linkKey] = dl.linkIndex
listingState.linkIndex++ dl.linkIndex++
} }
if entry.IsDir() { if entry.IsDir() {

View File

@@ -7,7 +7,6 @@ package duplicacy
import ( import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"io/ioutil"
"math/rand" "math/rand"
"os" "os"
"path/filepath" "path/filepath"
@@ -166,13 +165,13 @@ func TestEntryOrder(t *testing.T) {
continue continue
} }
err := ioutil.WriteFile(fullPath, []byte(file), 0700) err := os.WriteFile(fullPath, []byte(file), 0700)
if err != nil { if err != nil {
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err) t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
} }
} }
listingState := NewListingState() lister := NewLocalDirectoryLister()
directories := make([]*Entry, 0, 4) directories := make([]*Entry, 0, 4)
directories = append(directories, CreateEntry("", 0, 0, 0)) directories = append(directories, CreateEntry("", 0, 0, 0))
@@ -184,7 +183,7 @@ func TestEntryOrder(t *testing.T) {
for len(directories) > 0 { for len(directories) > 0 {
directory := directories[len(directories)-1] directory := directories[len(directories)-1]
directories = directories[:len(directories)-1] directories = directories[:len(directories)-1]
subdirectories, _, err := ListEntries(testDir, directory.Path, nil, "", false, listingState, entryChannel) subdirectories, _, err := lister.ListDir(testDir, directory.Path, entryChannel, nil)
if err != nil { if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err) t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
} }
@@ -277,7 +276,7 @@ func TestEntryExcludeByAttribute(t *testing.T) {
continue continue
} }
err := ioutil.WriteFile(fullPath, []byte(file), 0700) err := os.WriteFile(fullPath, []byte(file), 0700)
if err != nil { if err != nil {
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err) t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
} }
@@ -293,7 +292,7 @@ func TestEntryExcludeByAttribute(t *testing.T) {
for _, excludeByAttribute := range [2]bool{true, false} { for _, excludeByAttribute := range [2]bool{true, false} {
t.Logf("testing excludeByAttribute: %t", excludeByAttribute) t.Logf("testing excludeByAttribute: %t", excludeByAttribute)
listingState := NewListingState() lister := NewLocalDirectoryLister()
directories := make([]*Entry, 0, 4) directories := make([]*Entry, 0, 4)
directories = append(directories, CreateEntry("", 0, 0, 0)) directories = append(directories, CreateEntry("", 0, 0, 0))
@@ -304,7 +303,11 @@ func TestEntryExcludeByAttribute(t *testing.T) {
for len(directories) > 0 { for len(directories) > 0 {
directory := directories[len(directories)-1] directory := directories[len(directories)-1]
directories = directories[:len(directories)-1] directories = directories[:len(directories)-1]
subdirectories, _, err := ListEntries(testDir, directory.Path, nil, "", excludeByAttribute, listingState, entryChannel) subdirectories, _, err := lister.ListDir(testDir, directory.Path, entryChannel,
&EntryListerOptions{
ExcludeByAttribute: excludeByAttribute,
})
if err != nil { if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err) t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
} }

View File

@@ -54,6 +54,7 @@ type Preference struct {
ExcludeByAttribute bool `json:"exclude_by_attribute"` ExcludeByAttribute bool `json:"exclude_by_attribute"`
ExcludeXattrs bool `json:"exclude_xattrs"` ExcludeXattrs bool `json:"exclude_xattrs"`
NormalizeXattrs bool `json:"normalize_xattrs"` NormalizeXattrs bool `json:"normalize_xattrs"`
IncludeFileFlags bool `json:"include_file_flags"`
IncludeSpecials bool `json:"include_specials"` IncludeSpecials bool `json:"include_specials"`
FileFlagsMask flagsMask `json:"file_flags_mask"` FileFlagsMask flagsMask `json:"file_flags_mask"`
} }

View File

@@ -9,15 +9,13 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"strings" "strings"
"time" "time"
"sort"
"github.com/vmihailenco/msgpack"
"github.com/vmihailenco/msgpack"
) )
// Snapshot represents a backup of the repository. // Snapshot represents a backup of the repository.
@@ -60,20 +58,41 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
type DirectoryListing struct { type DirectoryListing struct {
directory string directory string
files *[]Entry files *[]Entry
} }
func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string, type ListFilesOptions struct {
filtersFile string, excludeByAttribute bool, listingChannel chan *Entry, NoBackupFile string
skippedDirectories *[]string, skippedFiles *[]string) { FiltersFile string
ExcludeByAttribute bool
ExcludeXattrs bool
NormalizeXattr bool
IncludeFileFlags bool
IncludeSpecials bool
}
var patterns []string func NewListFilesOptions(p *Preference) *ListFilesOptions {
listingState := NewListingState() return &ListFilesOptions{
NoBackupFile: p.NobackupFile,
if filtersFile == "" { FiltersFile: p.FiltersFile,
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters") ExcludeByAttribute: p.ExcludeByAttribute,
ExcludeXattrs: p.ExcludeXattrs,
NormalizeXattr: p.NormalizeXattrs,
IncludeFileFlags: p.IncludeFileFlags,
IncludeSpecials: p.IncludeSpecials,
} }
patterns = ProcessFilters(filtersFile) }
func (snapshot *Snapshot) ListLocalFiles(top string,
listingChannel chan *Entry, skippedDirectories *[]string, skippedFiles *[]string,
options *ListFilesOptions) {
if options.FiltersFile == "" {
options.FiltersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
}
patterns := ProcessFilters(options.FiltersFile)
lister := NewLocalDirectoryLister()
directories := make([]*Entry, 0, 256) directories := make([]*Entry, 0, 256)
directories = append(directories, CreateEntry("", 0, 0, 0)) directories = append(directories, CreateEntry("", 0, 0, 0))
@@ -82,7 +101,16 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
directory := directories[len(directories)-1] directory := directories[len(directories)-1]
directories = directories[:len(directories)-1] directories = directories[:len(directories)-1]
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingState, listingChannel) subdirectories, skipped, err := lister.ListDir(top, directory.Path, listingChannel,
&EntryListerOptions{
Patterns: patterns,
NoBackupFile: options.NoBackupFile,
ExcludeByAttribute: options.ExcludeByAttribute,
ExcludeXattrs: options.ExcludeXattrs,
NormalizeXattr: options.NormalizeXattr,
IncludeFileFlags: options.IncludeFileFlags,
IncludeSpecials: options.IncludeSpecials,
})
if err != nil { if err != nil {
if directory.Path == "" { if directory.Path == "" {
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err) LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
@@ -105,7 +133,7 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
close(listingChannel) close(listingChannel)
} }
func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOperator, entryOut func(*Entry) bool) { func (snapshot *Snapshot) ListRemoteFiles(config *Config, chunkOperator *ChunkOperator, entryOut func(*Entry) bool) {
var chunks []string var chunks []string
for _, chunkHash := range snapshot.FileSequence { for _, chunkHash := range snapshot.FileSequence {
@@ -125,12 +153,12 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe
if chunk != nil { if chunk != nil {
config.PutChunk(chunk) config.PutChunk(chunk)
} }
} () }()
// Normally if Version is 0 then the snapshot is created by CLI v2 but unfortunately CLI 3.0.1 does not set the // Normally if Version is 0 then the snapshot is created by CLI v2 but unfortunately CLI 3.0.1 does not set the
// version bit correctly when copying old backups. So we need to check the first byte -- if it is '[' then it is // version bit correctly when copying old backups. So we need to check the first byte -- if it is '[' then it is
// the old format. The new format starts with a string encoded in msgpack and the first byte can't be '['. // the old format. The new format starts with a string encoded in msgpack and the first byte can't be '['.
if snapshot.Version == 0 || reader.GetFirstByte() == '['{ if snapshot.Version == 0 || reader.GetFirstByte() == '[' {
LOG_INFO("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in an old version format", snapshot.ID, snapshot.Revision) LOG_INFO("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in an old version format", snapshot.ID, snapshot.Revision)
files := make([]*Entry, 0) files := make([]*Entry, 0)
decoder := json.NewDecoder(reader) decoder := json.NewDecoder(reader)
@@ -201,7 +229,7 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe
} else { } else {
LOG_ERROR("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in unsupported version %d format", LOG_ERROR("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in unsupported version %d format",
snapshot.ID, snapshot.Revision, snapshot.Version) snapshot.ID, snapshot.Revision, snapshot.Version)
return return
} }
@@ -244,7 +272,7 @@ func ProcessFilterFile(patternFile string, includedFiles []string) (patterns []s
} }
includedFiles = append(includedFiles, patternFile) includedFiles = append(includedFiles, patternFile)
LOG_INFO("SNAPSHOT_FILTER", "Parsing filter file %s", patternFile) LOG_INFO("SNAPSHOT_FILTER", "Parsing filter file %s", patternFile)
patternFileContent, err := ioutil.ReadFile(patternFile) patternFileContent, err := os.ReadFile(patternFile)
if err == nil { if err == nil {
patternFileLines := strings.Split(string(patternFileContent), "\n") patternFileLines := strings.Split(string(patternFileContent), "\n")
patterns = ProcessFilterLines(patternFileLines, includedFiles) patterns = ProcessFilterLines(patternFileLines, includedFiles)
@@ -264,7 +292,7 @@ func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patt
if patternIncludeFile == "" { if patternIncludeFile == "" {
continue continue
} }
if ! filepath.IsAbs(patternIncludeFile) { if !filepath.IsAbs(patternIncludeFile) {
basePath := "" basePath := ""
if len(includedFiles) == 0 { if len(includedFiles) == 0 {
basePath, _ = os.Getwd() basePath, _ = os.Getwd()
@@ -491,4 +519,3 @@ func encodeSequence(sequence []string) []string {
return sequenceInHex return sequenceInHex
} }

View File

@@ -18,10 +18,10 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"text/tabwriter"
"time"
"sync" "sync"
"sync/atomic" "sync/atomic"
"text/tabwriter"
"time"
"github.com/aryann/difflib" "github.com/aryann/difflib"
) )
@@ -191,7 +191,7 @@ type SnapshotManager struct {
fileChunk *Chunk fileChunk *Chunk
snapshotCache *FileStorage snapshotCache *FileStorage
chunkOperator *ChunkOperator chunkOperator *ChunkOperator
} }
// CreateSnapshotManager creates a snapshot manager // CreateSnapshotManager creates a snapshot manager
@@ -738,7 +738,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
totalFileSize := int64(0) totalFileSize := int64(0)
lastChunk := 0 lastChunk := 0
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry)bool { snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry) bool {
if file.IsFile() { if file.IsFile() {
totalFiles++ totalFiles++
totalFileSize += file.Size totalFileSize += file.Size
@@ -753,7 +753,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
return true return true
}) })
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry)bool { snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry) bool {
if file.IsFile() { if file.IsFile() {
LOG_INFO("SNAPSHOT_FILE", "%s", file.String(maxSizeDigits)) LOG_INFO("SNAPSHOT_FILE", "%s", file.String(maxSizeDigits))
} }
@@ -908,7 +908,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
_, exist, _, err := manager.storage.FindChunk(0, chunkID, false) _, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
if err != nil { if err != nil {
LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v", LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
chunkID, err) chunkID, err)
} else if exist { } else if exist {
LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID) LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID)
continue continue
@@ -1031,7 +1031,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
if err != nil { if err != nil {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to save the verified chunks file: %v", err) LOG_WARN("SNAPSHOT_VERIFY", "Failed to save the verified chunks file: %v", err)
} else { } else {
LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks) - numberOfVerifiedChunks) LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks)-numberOfVerifiedChunks)
} }
} }
} }
@@ -1073,7 +1073,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
defer CatchLogException() defer CatchLogException()
for { for {
chunkIndex, ok := <- chunkChannel chunkIndex, ok := <-chunkChannel
if !ok { if !ok {
wg.Done() wg.Done()
return return
@@ -1093,14 +1093,14 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
elapsedTime := time.Now().Sub(startTime).Seconds() elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(downloadedChunkSize) / elapsedTime) speed := int64(float64(downloadedChunkSize) / elapsedTime)
remainingTime := int64(float64(totalChunks - downloadedChunks) / float64(downloadedChunks) * elapsedTime) remainingTime := int64(float64(totalChunks-downloadedChunks) / float64(downloadedChunks) * elapsedTime)
percentage := float64(downloadedChunks) / float64(totalChunks) * 100.0 percentage := float64(downloadedChunks) / float64(totalChunks) * 100.0
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%", LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
chunkID, downloadedChunks, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage) chunkID, downloadedChunks, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage)
manager.config.PutChunk(chunk) manager.config.PutChunk(chunk)
} }
} () }()
} }
for chunkIndex := range chunkHashes { for chunkIndex := range chunkHashes {
@@ -1289,10 +1289,10 @@ func (manager *SnapshotManager) PrintSnapshot(snapshot *Snapshot) bool {
} }
// Don't print the ending bracket // Don't print the ending bracket
fmt.Printf("%s", string(description[:len(description) - 2])) fmt.Printf("%s", string(description[:len(description)-2]))
fmt.Printf(",\n \"files\": [\n") fmt.Printf(",\n \"files\": [\n")
isFirstFile := true isFirstFile := true
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (file *Entry) bool { snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry) bool {
fileDescription, _ := json.MarshalIndent(file.convertToObject(false), "", " ") fileDescription, _ := json.MarshalIndent(file.convertToObject(false), "", " ")
@@ -1322,7 +1322,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
} }
files := make([]*Entry, 0) files := make([]*Entry, 0)
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (file *Entry) bool { snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry) bool {
if file.IsFile() && file.Size != 0 { if file.IsFile() && file.Size != 0 {
file.Attributes = nil file.Attributes = nil
files = append(files, file) files = append(files, file)
@@ -1426,7 +1426,7 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, la
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) *Entry { func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) *Entry {
var found *Entry var found *Entry
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (entry *Entry) bool { snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(entry *Entry) bool {
if entry.Path == filePath { if entry.Path == filePath {
found = entry found = entry
return false return false
@@ -1479,8 +1479,8 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
file := manager.FindFile(snapshot, path, false) file := manager.FindFile(snapshot, path, false)
if !manager.RetrieveFile(snapshot, file, nil, func(chunk []byte) { if !manager.RetrieveFile(snapshot, file, nil, func(chunk []byte) {
fmt.Printf("%s", chunk) fmt.Printf("%s", chunk)
}) { }) {
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d", LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
path, snapshot.ID, snapshot.Revision) path, snapshot.ID, snapshot.Revision)
return false return false
@@ -1491,7 +1491,8 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
// Diff compares two snapshots, or two revision of a file if the file argument is given. // Diff compares two snapshots, or two revision of a file if the file argument is given.
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int, func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
filePath string, compareByHash bool, nobackupFile string, filtersFile string, excludeByAttribute bool) bool { filePath string, compareByHash bool,
options *ListFilesOptions) bool {
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t", LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
top, snapshotID, revisions, filePath, compareByHash) top, snapshotID, revisions, filePath, compareByHash)
@@ -1500,7 +1501,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
defer func() { defer func() {
manager.chunkOperator.Stop() manager.chunkOperator.Stop()
manager.chunkOperator = nil manager.chunkOperator = nil
} () }()
var leftSnapshot *Snapshot var leftSnapshot *Snapshot
var rightSnapshot *Snapshot var rightSnapshot *Snapshot
@@ -1516,11 +1517,11 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
localListingChannel := make(chan *Entry) localListingChannel := make(chan *Entry)
go func() { go func() {
defer CatchLogException() defer CatchLogException()
rightSnapshot.ListLocalFiles(top, nobackupFile, filtersFile, excludeByAttribute, localListingChannel, nil, nil) rightSnapshot.ListLocalFiles(top, localListingChannel, nil, nil, options)
} () }()
for entry := range localListingChannel { for entry := range localListingChannel {
entry.Attributes = nil // attributes are not compared entry.Attributes = nil // attributes are not compared
rightSnapshotFiles = append(rightSnapshotFiles, entry) rightSnapshotFiles = append(rightSnapshotFiles, entry)
} }
@@ -1725,7 +1726,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
defer func() { defer func() {
manager.chunkOperator.Stop() manager.chunkOperator.Stop()
manager.chunkOperator = nil manager.chunkOperator = nil
} () }()
var err error var err error
@@ -1821,15 +1822,16 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string
// PruneSnapshots deletes snapshots by revisions, tags, or a retention policy. The main idea is two-step // PruneSnapshots deletes snapshots by revisions, tags, or a retention policy. The main idea is two-step
// fossil collection. // fossil collection.
// 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced
// chunks, and mark them as fossils (by renaming). After that, create a fossil collection file containing
// fossils collected during current run, and temporary files encountered. Also in the file is the latest
// revision for each snapshot id. Save this file to a local directory.
// //
// 2. On next run, check if there is any new revision for each snapshot. Or if the lastest revision is too // 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced
// old, for instance, more than 7 days. This step is to identify snapshots that were being created while // chunks, and mark them as fossils (by renaming). After that, create a fossil collection file containing
// step 1 is in progress. For each fossil reference by any of these snapshots, move them back to the // fossils collected during current run, and temporary files encountered. Also in the file is the latest
// normal chunk directory. // revision for each snapshot id. Save this file to a local directory.
//
// 2. On next run, check if there is any new revision for each snapshot. Or if the lastest revision is too
// old, for instance, more than 7 days. This step is to identify snapshots that were being created while
// step 1 is in progress. For each fossil reference by any of these snapshots, move them back to the
// normal chunk directory.
// //
// Note that a snapshot being created when step 2 is in progress may reference a fossil. To avoid this // Note that a snapshot being created when step 2 is in progress may reference a fossil. To avoid this
// problem, never remove the lastest revision (unless exclusive is true), and only cache chunks referenced // problem, never remove the lastest revision (unless exclusive is true), and only cache chunks referenced
@@ -1853,7 +1855,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
defer func() { defer func() {
manager.chunkOperator.Stop() manager.chunkOperator.Stop()
manager.chunkOperator = nil manager.chunkOperator = nil
} () }()
prefPath := GetDuplicacyPreferencePath() prefPath := GetDuplicacyPreferencePath()
logDir := path.Join(prefPath, "logs") logDir := path.Join(prefPath, "logs")
@@ -2544,7 +2546,7 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
numberOfChunks, len(snapshot.ChunkLengths)) numberOfChunks, len(snapshot.ChunkLengths))
} }
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (entry *Entry) bool { snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(entry *Entry) bool {
if lastEntry != nil && lastEntry.Compare(entry) >= 0 && !strings.Contains(lastEntry.Path, "\ufffd") { if lastEntry != nil && lastEntry.Compare(entry) >= 0 && !strings.Contains(lastEntry.Path, "\ufffd") {
err = fmt.Errorf("The entry %s appears before the entry %s", lastEntry.Path, entry.Path) err = fmt.Errorf("The entry %s appears before the entry %s", lastEntry.Path, entry.Path)
@@ -2598,7 +2600,7 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
if entry.Size != fileSize { if entry.Size != fileSize {
err = fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d", err = fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d",
entry.Path, entry.Size, fileSize) entry.Path, entry.Size, fileSize)
return false return false
} }
return true return true
@@ -2647,7 +2649,7 @@ func (manager *SnapshotManager) DownloadFile(path string, derivationKey string)
err = manager.storage.UploadFile(0, path, newChunk.GetBytes()) err = manager.storage.UploadFile(0, path, newChunk.GetBytes())
if err != nil { if err != nil {
LOG_WARN("DOWNLOAD_REWRITE", "Failed to re-uploaded the file %s: %v", path, err) LOG_WARN("DOWNLOAD_REWRITE", "Failed to re-uploaded the file %s: %v", path, err)
} else{ } else {
LOG_INFO("DOWNLOAD_REWRITE", "The file %s has been re-uploaded", path) LOG_INFO("DOWNLOAD_REWRITE", "The file %s has been re-uploaded", path)
} }
} }

View File

@@ -143,30 +143,6 @@ func (entry *Entry) getHardLinkKey(f os.FileInfo) (key listEntryLinkKey, linked
return return
} }
func (entry *Entry) ReadAttributes(fullPath string, fi os.FileInfo) error {
return nil
}
func (entry *Entry) ReadFileFlags(fullPath string, fileInfo os.FileInfo) error {
return nil
}
func (entry *Entry) SetAttributesToFile(fullPath string, normalize bool) error {
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(fullPath string, mask uint32) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File, mask uint32) error {
return nil
}
func (entry *Entry) RestoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error {
return nil
}
func (entry *Entry) ReadSpecial(fullPath string, fileInfo os.FileInfo) error { func (entry *Entry) ReadSpecial(fullPath string, fileInfo os.FileInfo) error {
return nil return nil
} }

View File

@@ -2,8 +2,8 @@
// Free for personal use and commercial trial // Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com // Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd //go:build freebsd
// +build freebsd netbsd // +build freebsd
package duplicacy package duplicacy

35
src/duplicacy_xattr.go Normal file
View File

@@ -0,0 +1,35 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import "os"
func (entry *Entry) ReadAttributes(fi os.FileInfo, fullPath string, normalize bool) error {
return entry.readAttributes(fi, fullPath, normalize)
}
func (entry *Entry) GetFileFlags(fileInfo os.FileInfo) bool {
return entry.getFileFlags(fileInfo)
}
func (entry *Entry) ReadFileFlags(fileInfo os.FileInfo, fullPath string) error {
return entry.readFileFlags(fileInfo, fullPath)
}
func (entry *Entry) RestoreEarlyDirFlags(fullPath string, mask uint32) error {
return entry.restoreEarlyDirFlags(fullPath, mask)
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File, mask uint32) error {
return entry.restoreEarlyFileFlags(f, mask)
}
func (entry *Entry) RestoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error {
return entry.restoreLateFileFlags(fullPath, fileInfo, mask)
}
func (entry *Entry) SetAttributesToFile(fullPath string, normalize bool) error {
return entry.setAttributesToFile(fullPath, normalize)
}

View File

@@ -8,6 +8,7 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"math"
"os" "os"
"syscall" "syscall"
@@ -25,7 +26,7 @@ func init() {
darwinIsSuperUser = unix.Geteuid() == 0 darwinIsSuperUser = unix.Geteuid() == 0
} }
func (entry *Entry) ReadAttributes(fullPath string, fi os.FileInfo) error { func (entry *Entry) readAttributes(fi os.FileInfo, fullPath string, normalize bool) error {
if entry.IsSpecial() { if entry.IsSpecial() {
return nil return nil
} }
@@ -51,7 +52,7 @@ func (entry *Entry) ReadAttributes(fullPath string, fi os.FileInfo) error {
return allErrors return allErrors
} }
func (entry *Entry) ReadFileFlags(fullPath string, fileInfo os.FileInfo) error { func (entry *Entry) getFileFlags(fileInfo os.FileInfo) bool {
stat := fileInfo.Sys().(*syscall.Stat_t) stat := fileInfo.Sys().(*syscall.Stat_t)
if stat.Flags != 0 { if stat.Flags != 0 {
if entry.Attributes == nil { if entry.Attributes == nil {
@@ -61,10 +62,14 @@ func (entry *Entry) ReadFileFlags(fullPath string, fileInfo os.FileInfo) error {
binary.LittleEndian.PutUint32(v, stat.Flags) binary.LittleEndian.PutUint32(v, stat.Flags)
(*entry.Attributes)[darwinFileFlagsKey] = v (*entry.Attributes)[darwinFileFlagsKey] = v
} }
return true
}
func (entry *Entry) readFileFlags(fileInfo os.FileInfo, fullPath string) error {
return nil return nil
} }
func (entry *Entry) SetAttributesToFile(fullPath string, normalize bool) error { func (entry *Entry) setAttributesToFile(fullPath string, normalize bool) error {
if entry.Attributes == nil || len(*entry.Attributes) == 0 || entry.IsSpecial() { if entry.Attributes == nil || len(*entry.Attributes) == 0 || entry.IsSpecial() {
return nil return nil
} }
@@ -100,16 +105,16 @@ func (entry *Entry) SetAttributesToFile(fullPath string, normalize bool) error {
return err return err
} }
func (entry *Entry) RestoreEarlyDirFlags(fullPath string, mask uint32) error { func (entry *Entry) restoreEarlyDirFlags(fullPath string, mask uint32) error {
return nil return nil
} }
func (entry *Entry) RestoreEarlyFileFlags(f *os.File, mask uint32) error { func (entry *Entry) restoreEarlyFileFlags(f *os.File, mask uint32) error {
return nil return nil
} }
func (entry *Entry) RestoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error { func (entry *Entry) restoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error {
if mask == 0xffffffff { if mask == math.MaxUint32 {
return nil return nil
} }

View File

@@ -9,6 +9,7 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"math"
"os" "os"
"unsafe" "unsafe"
@@ -68,7 +69,7 @@ func ioctl(f *os.File, request uintptr, attrp *uint32) error {
}) })
} }
func (entry *Entry) ReadAttributes(fullPath string, fi os.FileInfo) error { func (entry *Entry) readAttributes(fi os.FileInfo, fullPath string, normalize bool) error {
attributes, err := xattr.LList(fullPath) attributes, err := xattr.LList(fullPath)
if err != nil { if err != nil {
return err return err
@@ -90,14 +91,18 @@ func (entry *Entry) ReadAttributes(fullPath string, fi os.FileInfo) error {
return allErrors return allErrors
} }
func (entry *Entry) ReadFileFlags(fullPath string, fileInfo os.FileInfo) error { func (entry *Entry) getFileFlags(fileInfo os.FileInfo) bool {
return false
}
func (entry *Entry) readFileFlags(fileInfo os.FileInfo, fullPath string) error {
// the linux file flags interface is quite depressing. The half assed attempt at statx // the linux file flags interface is quite depressing. The half assed attempt at statx
// doesn't even cover the flags we're interested in // doesn't even cover the flags we're usually interested in for btrfs
if !(entry.IsFile() || entry.IsDir()) { if !(entry.IsFile() || entry.IsDir()) {
return nil return nil
} }
f, err := os.OpenFile(fullPath, os.O_RDONLY|unix.O_NOATIME|unix.O_NOFOLLOW, 0) f, err := os.OpenFile(fullPath, os.O_RDONLY|unix.O_NONBLOCK|unix.O_NOFOLLOW|unix.O_NOATIME, 0)
if err != nil { if err != nil {
return err return err
} }
@@ -107,6 +112,7 @@ func (entry *Entry) ReadFileFlags(fullPath string, fileInfo os.FileInfo) error {
err = ioctl(f, unix.FS_IOC_GETFLAGS, &flags) err = ioctl(f, unix.FS_IOC_GETFLAGS, &flags)
f.Close() f.Close()
if err != nil { if err != nil {
// inappropriate ioctl for device means flags aren't a thing on that FS
if err == unix.ENOTTY { if err == unix.ENOTTY {
return nil return nil
} }
@@ -124,7 +130,7 @@ func (entry *Entry) ReadFileFlags(fullPath string, fileInfo os.FileInfo) error {
return nil return nil
} }
func (entry *Entry) SetAttributesToFile(fullPath string, normalize bool) error { func (entry *Entry) setAttributesToFile(fullPath string, normalize bool) error {
if entry.Attributes == nil || len(*entry.Attributes) == 0 { if entry.Attributes == nil || len(*entry.Attributes) == 0 {
return nil return nil
} }
@@ -160,8 +166,8 @@ func (entry *Entry) SetAttributesToFile(fullPath string, normalize bool) error {
return err return err
} }
func (entry *Entry) RestoreEarlyDirFlags(fullPath string, mask uint32) error { func (entry *Entry) restoreEarlyDirFlags(fullPath string, mask uint32) error {
if entry.Attributes == nil || mask == 0xffffffff { if entry.Attributes == nil || mask == math.MaxUint32 {
return nil return nil
} }
var flags uint32 var flags uint32
@@ -184,8 +190,8 @@ func (entry *Entry) RestoreEarlyDirFlags(fullPath string, mask uint32) error {
return nil return nil
} }
func (entry *Entry) RestoreEarlyFileFlags(f *os.File, mask uint32) error { func (entry *Entry) restoreEarlyFileFlags(f *os.File, mask uint32) error {
if entry.Attributes == nil || mask == 0xffffffff { if entry.Attributes == nil || mask == math.MaxUint32 {
return nil return nil
} }
var flags uint32 var flags uint32
@@ -203,8 +209,8 @@ func (entry *Entry) RestoreEarlyFileFlags(f *os.File, mask uint32) error {
return nil return nil
} }
func (entry *Entry) RestoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error { func (entry *Entry) restoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error {
if entry.IsLink() || entry.Attributes == nil || mask == 0xffffffff { if entry.IsLink() || entry.Attributes == nil || mask == math.MaxUint32 {
return nil return nil
} }
var flags uint32 var flags uint32

View File

@@ -0,0 +1,35 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import "os"
func (entry *Entry) readAttributes(fi os.FileInfo, fullPath string, normalize bool) error {
return nil
}
func (entry *Entry) getFileFlags(fileInfo os.FileInfo) bool {
return true
}
func (entry *Entry) readFileFlags(fileInfo os.FileInfo, fullPath string) error {
return nil
}
func (entry *Entry) setAttributesToFile(fullPath string, normalize bool) error {
return nil
}
func (entry *Entry) restoreEarlyDirFlags(fullPath string, mask uint32) error {
return nil
}
func (entry *Entry) restoreEarlyFileFlags(f *os.File, mask uint32) error {
return nil
}
func (entry *Entry) restoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error {
return nil
}

View File

@@ -11,6 +11,7 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"errors" "errors"
"math"
"os" "os"
"syscall" "syscall"
"unsafe" "unsafe"
@@ -32,7 +33,7 @@ func init() {
bsdIsSuperUser = syscall.Geteuid() == 0 bsdIsSuperUser = syscall.Geteuid() == 0
} }
func (entry *Entry) ReadAttributes(fullPath string, fi os.FileInfo) error { func (entry *Entry) readAttributes(fi os.FileInfo, fullPath string, normalize bool) error {
if entry.IsSpecial() { if entry.IsSpecial() {
return nil return nil
} }
@@ -58,7 +59,7 @@ func (entry *Entry) ReadAttributes(fullPath string, fi os.FileInfo) error {
return allErrors return allErrors
} }
func (entry *Entry) ReadFileFlags(fullPath string, fileInfo os.FileInfo) error { func (entry *Entry) getFileFlags(fileInfo os.FileInfo) bool {
stat := fileInfo.Sys().(*syscall.Stat_t) stat := fileInfo.Sys().(*syscall.Stat_t)
if stat.Flags != 0 { if stat.Flags != 0 {
if entry.Attributes == nil { if entry.Attributes == nil {
@@ -68,10 +69,14 @@ func (entry *Entry) ReadFileFlags(fullPath string, fileInfo os.FileInfo) error {
binary.LittleEndian.PutUint32(v, stat.Flags) binary.LittleEndian.PutUint32(v, stat.Flags)
(*entry.Attributes)[bsdFileFlagsKey] = v (*entry.Attributes)[bsdFileFlagsKey] = v
} }
return true
}
func (entry *Entry) readFileFlags(fileInfo os.FileInfo, fullPath string) error {
return nil return nil
} }
func (entry *Entry) SetAttributesToFile(fullPath string, normalize bool) error { func (entry *Entry) setAttributesToFile(fullPath string, normalize bool) error {
if entry.Attributes == nil || len(*entry.Attributes) == 0 || entry.IsSpecial() { if entry.Attributes == nil || len(*entry.Attributes) == 0 || entry.IsSpecial() {
return nil return nil
} }
@@ -107,16 +112,16 @@ func (entry *Entry) SetAttributesToFile(fullPath string, normalize bool) error {
return err return err
} }
func (entry *Entry) RestoreEarlyDirFlags(fullPath string, mask uint32) error { func (entry *Entry) restoreEarlyDirFlags(fullPath string, mask uint32) error {
return nil return nil
} }
func (entry *Entry) RestoreEarlyFileFlags(f *os.File, mask uint32) error { func (entry *Entry) restoreEarlyFileFlags(f *os.File, mask uint32) error {
return nil return nil
} }
func (entry *Entry) RestoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error { func (entry *Entry) restoreLateFileFlags(fullPath string, fileInfo os.FileInfo, mask uint32) error {
if mask == 0xffffffff { if mask == math.MaxUint32 {
return nil return nil
} }