Compare commits

..

1 Commits

Author SHA1 Message Date
ad677702ba Initial implementation of file/inode flags (Linux, BSD, darwin)
Basic support for BSD and Darwin style chflags (stat flags). Applies
these flags at the end of file restore.
Supports linux style ioctl_iflags(2) in a 2 step process. Flags that
need to be applied prior to writes such as compress and especially no-COW
are applied immediately upon file open.

The flags format is backwards compatible. An attribute starting with a
null byte is used to store flags in the entry attributes table. With
an old version of duplicacy the restore of this attribute should silently
fail (effectively be ignored).

Fixes xattr restore to use O_NOFOLLOW so attributes are applied to symlink.

TODO: Tests, possible option to switch off mutable/append prior to
restore of existing file similar to rsync. Does not apply attributes
or flags to the top most directory.
2023-10-02 17:36:34 -05:00
12 changed files with 179 additions and 682 deletions

View File

@@ -21,7 +21,7 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/vmihailenco/msgpack" "github.com/vmihailenco/msgpack"
) )
// BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to // BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to
@@ -36,9 +36,9 @@ type BackupManager struct {
config *Config // contains a number of options config *Config // contains a number of options
nobackupFile string // don't backup directory when this file name is found nobackupFile string // don't backup directory when this file name is found
filtersFile string // the path to the filters file filtersFile string // the path to the filters file
excludeByAttribute bool // don't backup file based on file attribute excludeByAttribute bool // don't backup file based on file attribute
cachePath string cachePath string
} }
@@ -146,7 +146,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
if manager.config.DataShards != 0 && manager.config.ParityShards != 0 { if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards", LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
manager.config.DataShards, manager.config.ParityShards) manager.config.DataShards, manager.config.ParityShards)
} }
if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 { if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 {
@@ -187,7 +187,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// If the listing operation is fast and this is an initial backup, list all chunks and // If the listing operation is fast and this is an initial backup, list all chunks and
// put them in the cache. // put them in the cache.
if manager.storage.IsFastListing() && remoteSnapshot.Revision == 0 { if (manager.storage.IsFastListing() && remoteSnapshot.Revision == 0) {
LOG_INFO("BACKUP_LIST", "Listing all chunks") LOG_INFO("BACKUP_LIST", "Listing all chunks")
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/") allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
@@ -222,7 +222,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var totalModifiedFileSize int64 // total size of modified files var totalModifiedFileSize int64 // total size of modified files
var uploadedModifiedFileSize int64 // portions that have been uploaded (including cache hits) var uploadedModifiedFileSize int64 // portions that have been uploaded (including cache hits)
var preservedFileSize int64 // total size of unmodified files var preservedFileSize int64 // total size of unmodified files
localSnapshot := CreateEmptySnapshot(manager.snapshotID) localSnapshot := CreateEmptySnapshot(manager.snapshotID)
localSnapshot.Revision = remoteSnapshot.Revision + 1 localSnapshot.Revision = remoteSnapshot.Revision + 1
@@ -239,7 +239,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// List local files // List local files
defer CatchLogException() defer CatchLogException()
localSnapshot.ListLocalFiles(shadowTop, manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute, localListingChannel, &skippedDirectories, &skippedFiles) localSnapshot.ListLocalFiles(shadowTop, manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute, localListingChannel, &skippedDirectories, &skippedFiles)
}() } ()
go func() { go func() {
// List remote files // List remote files
@@ -261,7 +261,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
}) })
} }
close(remoteListingChannel) close(remoteListingChannel)
}() } ()
// Create the local file list // Create the local file list
localEntryList, err := CreateEntryList(manager.snapshotID, manager.cachePath, maximumInMemoryEntries) localEntryList, err := CreateEntryList(manager.snapshotID, manager.cachePath, maximumInMemoryEntries)
@@ -275,7 +275,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var remoteEntry *Entry var remoteEntry *Entry
remoteListingOK := true remoteListingOK := true
for { for {
localEntry := <-localListingChannel localEntry := <- localListingChannel
if localEntry == nil { if localEntry == nil {
break break
} }
@@ -289,7 +289,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
compareResult = localEntry.Compare(remoteEntry) compareResult = localEntry.Compare(remoteEntry)
} else { } else {
if remoteListingOK { if remoteListingOK {
remoteEntry, remoteListingOK = <-remoteListingChannel remoteEntry, remoteListingOK = <- remoteListingChannel
} }
if !remoteListingOK { if !remoteListingOK {
compareResult = -1 compareResult = -1
@@ -304,7 +304,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
remoteEntry = nil remoteEntry = nil
} }
if compareResult == 0 { if compareResult == 0 {
// No need to check if it is in hash mode -- in that case remote listing is nil // No need to check if it is in hash mode -- in that case remote listing is nil
if localEntry.IsSameAs(remoteEntry) && localEntry.IsFile() { if localEntry.IsSameAs(remoteEntry) && localEntry.IsFile() {
if localEntry.Size > 0 { if localEntry.Size > 0 {
@@ -339,8 +339,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// compareResult must be < 0; the local file is new // compareResult must be < 0; the local file is new
totalModifiedFileSize += localEntry.Size totalModifiedFileSize += localEntry.Size
if localEntry.Size > 0 { if localEntry.Size > 0 {
// A size of -1 indicates this is a modified file that will be uploaded // A size of -1 indicates this is a modified file that will be uploaded
localEntry.Size = -1 localEntry.Size = -1
} }
} }
@@ -448,7 +448,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
_, found := chunkCache[chunkID] _, found := chunkCache[chunkID]
if found { if found {
if time.Now().Unix()-lastUploadingTime > keepUploadAlive { if time.Now().Unix() - lastUploadingTime > keepUploadAlive {
LOG_INFO("UPLOAD_KEEPALIVE", "Skip chunk cache to keep connection alive") LOG_INFO("UPLOAD_KEEPALIVE", "Skip chunk cache to keep connection alive")
found = false found = false
} }
@@ -558,7 +558,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
if showStatistics { if showStatistics {
LOG_INFO("BACKUP_STATS", "Files: %d total, %s bytes; %d new, %s bytes", LOG_INFO("BACKUP_STATS", "Files: %d total, %s bytes; %d new, %s bytes",
localEntryList.NumberOfEntries-int64(len(skippedFiles)), localEntryList.NumberOfEntries - int64(len(skippedFiles)),
PrettyNumber(preservedFileSize+uploadedFileSize), PrettyNumber(preservedFileSize+uploadedFileSize),
len(localEntryList.ModifiedEntries), PrettyNumber(uploadedFileSize)) len(localEntryList.ModifiedEntries), PrettyNumber(uploadedFileSize))
@@ -686,7 +686,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
// List local files // List local files
defer CatchLogException() defer CatchLogException()
localSnapshot.ListLocalFiles(top, manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute, localListingChannel, nil, nil) localSnapshot.ListLocalFiles(top, manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute, localListingChannel, nil, nil)
}() } ()
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision) remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
manager.SnapshotManager.DownloadSnapshotSequences(remoteSnapshot) manager.SnapshotManager.DownloadSnapshotSequences(remoteSnapshot)
@@ -698,47 +698,13 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
return true return true
}) })
close(remoteListingChannel) close(remoteListingChannel)
}() } ()
var localEntry *Entry var localEntry *Entry
localListingOK := true localListingOK := true
type hardLinkEntry struct {
entry *Entry
willExist bool
}
var hardLinkTable []hardLinkEntry
var hardLinks []*Entry
restoreHardlink := func(entry *Entry, fullPath string) bool {
if entry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1].willExist = true
} else if entry.IsHardlinkedFrom() {
i, err := entry.GetHardlinkId()
if err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Decode error for hardlinked entry %s, %v", entry.Path, err)
return false
}
if !hardLinkTable[i].willExist {
hardLinkTable[i] = hardLinkEntry{entry, true}
} else {
sourcePath := joinPath(top, hardLinkTable[i].entry.Path)
LOG_INFO("RESTORE_HARDLINK", "Hard linking %s to %s", fullPath, sourcePath)
if err := MakeHardlink(sourcePath, fullPath); err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Failed to create hard link %s to %s %v", fullPath, sourcePath, err)
}
return true
}
}
return false
}
for remoteEntry := range remoteListingChannel { for remoteEntry := range remoteListingChannel {
if remoteEntry.IsHardlinkRoot() {
hardLinkTable = append(hardLinkTable, hardLinkEntry{remoteEntry, false})
}
if len(patterns) > 0 && !MatchPath(remoteEntry.Path, patterns) { if len(patterns) > 0 && !MatchPath(remoteEntry.Path, patterns) {
continue continue
} }
@@ -748,7 +714,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
for { for {
if localEntry == nil && localListingOK { if localEntry == nil && localListingOK {
localEntry, localListingOK = <-localListingChannel localEntry, localListingOK = <- localListingChannel
} }
if localEntry == nil { if localEntry == nil {
compareResult = 1 compareResult = 1
@@ -775,39 +741,26 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
} }
fullPath := joinPath(top, remoteEntry.Path) fullPath := joinPath(top, remoteEntry.Path)
if remoteEntry.IsLink() { if remoteEntry.IsLink() {
if stat, _ := os.Lstat(fullPath); stat != nil { stat, err := os.Lstat(fullPath)
if stat != nil {
if stat.Mode()&os.ModeSymlink != 0 { if stat.Mode()&os.ModeSymlink != 0 {
isRegular, link, err := Readlink(fullPath) isRegular, link, err := Readlink(fullPath)
if err == nil && link == remoteEntry.Link && !isRegular { if err == nil && link == remoteEntry.Link && !isRegular {
remoteEntry.RestoreMetadata(fullPath, nil, setOwner) remoteEntry.RestoreMetadata(fullPath, nil, setOwner)
if remoteEntry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1].willExist = true
}
continue continue
} }
} }
if !overwrite {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", remoteEntry.Path)
continue
}
os.Remove(fullPath) os.Remove(fullPath)
} }
if restoreHardlink(remoteEntry, fullPath) { err = os.Symlink(remoteEntry.Link, fullPath)
continue if err != nil {
}
if err := os.Symlink(remoteEntry.Link, fullPath); err != nil {
LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", remoteEntry.Path, err) LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", remoteEntry.Path, err)
return 0 return 0
} }
remoteEntry.RestoreMetadata(fullPath, nil, setOwner) remoteEntry.RestoreMetadata(fullPath, nil, setOwner)
LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", remoteEntry.Path) LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", remoteEntry.Path)
} else if remoteEntry.IsDir() { } else if remoteEntry.IsDir() {
@@ -829,48 +782,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
} }
remoteEntry.RestoreEarlyDirFlags(fullPath) remoteEntry.RestoreEarlyDirFlags(fullPath)
directoryEntries = append(directoryEntries, remoteEntry) directoryEntries = append(directoryEntries, remoteEntry)
} else if remoteEntry.IsSpecial() {
if stat, _ := os.Lstat(fullPath); stat != nil {
if remoteEntry.IsSameSpecial(stat) {
remoteEntry.RestoreMetadata(fullPath, nil, setOwner)
if remoteEntry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1].willExist = true
}
}
if !overwrite {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", remoteEntry.Path)
continue
}
os.Remove(fullPath)
}
if restoreHardlink(remoteEntry, fullPath) {
continue
}
if err := remoteEntry.RestoreSpecial(fullPath); err != nil {
LOG_ERROR("RESTORE_SPECIAL", "Unable to restore special file %s: %v", remoteEntry.Path, err)
return 0
}
remoteEntry.RestoreMetadata(fullPath, nil, setOwner)
} else { } else {
if remoteEntry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1].willExist = true
} else if remoteEntry.IsHardlinkedFrom() {
i, err := remoteEntry.GetHardlinkId()
if err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Decode error for hardlinked entry %s, %v", remoteEntry.Path, err)
return 0
}
if !hardLinkTable[i].willExist {
hardLinkTable[i] = hardLinkEntry{remoteEntry, true}
} else {
hardLinks = append(hardLinks, remoteEntry)
continue
}
}
// We can't download files here since fileEntries needs to be sorted // We can't download files here since fileEntries needs to be sorted
fileEntries = append(fileEntries, remoteEntry) fileEntries = append(fileEntries, remoteEntry)
totalFileSize += remoteEntry.Size totalFileSize += remoteEntry.Size
@@ -882,7 +794,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
} }
for localListingOK { for localListingOK {
localEntry, localListingOK = <-localListingChannel localEntry, localListingOK = <- localListingChannel
if localEntry != nil { if localEntry != nil {
extraFiles = append(extraFiles, localEntry.Path) extraFiles = append(extraFiles, localEntry.Path)
} }
@@ -990,38 +902,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
file.RestoreMetadata(fullPath, nil, setOwner) file.RestoreMetadata(fullPath, nil, setOwner)
} }
for _, linkEntry := range hardLinks {
i, _ := linkEntry.GetHardlinkId()
sourcePath := joinPath(top, hardLinkTable[i].entry.Path)
fullPath := joinPath(top, linkEntry.Path)
if stat, _ := os.Lstat(fullPath); stat != nil {
sourceStat, _ := os.Lstat(sourcePath)
if os.SameFile(stat, sourceStat) {
continue
}
if sourceStat == nil {
LOG_WERROR(allowFailures, "RESTORE_HARDLINK",
"Target %s for hardlink %s is missing", sourcePath, linkEntry.Path)
continue
}
if !overwrite {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", linkEntry.Path)
continue
}
os.Remove(fullPath)
}
LOG_DEBUG("RESTORE_HARDLINK", "Hard linking %s to %s", fullPath, sourcePath)
if err := MakeHardlink(sourcePath, fullPath); err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Failed to create hard link %s to %s", fullPath, sourcePath)
return 0
}
}
if deleteMode && len(patterns) == 0 { if deleteMode && len(patterns) == 0 {
// Reverse the order to make sure directories are empty before being deleted // Reverse the order to make sure directories are empty before being deleted
for i := range extraFiles { for i := range extraFiles {
@@ -1167,19 +1047,14 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
encoder := msgpack.NewEncoder(buffer) encoder := msgpack.NewEncoder(buffer)
metadataChunkMaker := CreateMetaDataChunkMaker(manager.config, metadataChunkSize) metadataChunkMaker := CreateMetaDataChunkMaker(manager.config, metadataChunkSize)
var chunkHashes []string var chunkHashes []string
var chunkLengths []int var chunkLengths []int
lastChunk := -1 lastChunk := -1
lastEndChunk := 0 lastEndChunk := 0
type hardLinkEntry struct {
entry *Entry
startChunk int
}
var hardLinkTable []hardLinkEntry
uploadEntryInfoFunc := func(entry *Entry) error { uploadEntryInfoFunc := func(entry *Entry) error {
if entry.IsFile() && entry.Size > 0 { if entry.IsFile() && entry.Size > 0 {
delta := entry.StartChunk - len(chunkHashes) + 1 delta := entry.StartChunk - len(chunkHashes) + 1
if entry.StartChunk != lastChunk { if entry.StartChunk != lastChunk {
@@ -1197,38 +1072,10 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
entry.StartChunk -= delta entry.StartChunk -= delta
entry.EndChunk -= delta entry.EndChunk -= delta
if entry.IsHardlinkRoot() {
LOG_DEBUG("SNAPSHOT_UPLOAD", "Hard link root %s %v %v", entry.Path, entry.StartChunk, entry.EndChunk)
hardLinkTable = append(hardLinkTable, hardLinkEntry{entry, entry.StartChunk})
}
delta = entry.EndChunk - entry.StartChunk delta = entry.EndChunk - entry.StartChunk
entry.StartChunk -= lastEndChunk entry.StartChunk -= lastEndChunk
lastEndChunk = entry.EndChunk lastEndChunk = entry.EndChunk
entry.EndChunk = delta entry.EndChunk = delta
} else if entry.IsHardlinkedFrom() && !entry.IsLink() {
i, err := entry.GetHardlinkId()
if err != nil {
LOG_ERROR("SNAPSHOT_UPLOAD", "Decode error for hardlinked entry %s, %v", entry.Link, err)
return err
}
targetEntry := hardLinkTable[i].entry
var startChunk, endChunk int
if targetEntry.Size > 0 {
startChunk = hardLinkTable[i].startChunk - lastEndChunk
endChunk = targetEntry.EndChunk
}
entry = entry.HardLinkTo(targetEntry, startChunk, endChunk)
if targetEntry.Size > 0 {
lastEndChunk = hardLinkTable[i].startChunk + endChunk
}
LOG_DEBUG("SNAPSHOT_UPLOAD", "Uploading cloned hardlink for %s to %s (%v %v)", entry.Path, targetEntry.Path, startChunk, endChunk)
} else if entry.IsHardlinkRoot() {
hardLinkTable = append(hardLinkTable, hardLinkEntry{entry, 0})
} }
buffer.Reset() buffer.Reset()
@@ -1297,10 +1144,9 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary // Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be // file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be
// overwritten directly. // overwritten directly.
// Return: true, nil: Restored file; // Return: true, nil: Restored file;
// // false, nil: Skipped file;
// false, nil: Skipped file; // false, error: Failure to restore file (only if allowFailures == true)
// false, error: Failure to restore file (only if allowFailures == true)
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool, func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64, allowFailures bool) (bool, error) { showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64, allowFailures bool) (bool, error) {
@@ -1476,7 +1322,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
// fileHash != entry.Hash, warn/error depending on -overwrite option // fileHash != entry.Hash, warn/error depending on -overwrite option
if !overwrite && !isNewFile { if !overwrite && !isNewFile {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE", LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path) "File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
return false, fmt.Errorf("file exists") return false, fmt.Errorf("file exists")
} }
@@ -1723,7 +1569,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
if otherManager.config.DataShards != 0 && otherManager.config.ParityShards != 0 { if otherManager.config.DataShards != 0 && otherManager.config.ParityShards != 0 {
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled for the destination storage with %d data shards and %d parity shards", LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled for the destination storage with %d data shards and %d parity shards",
otherManager.config.DataShards, otherManager.config.ParityShards) otherManager.config.DataShards, otherManager.config.ParityShards)
} }
if otherManager.config.rsaPublicKey != nil && len(otherManager.config.FileKey) > 0 { if otherManager.config.rsaPublicKey != nil && len(otherManager.config.FileKey) > 0 {
@@ -1824,15 +1670,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision) LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
for _, chunkHash := range snapshot.FileSequence { for _, chunkHash := range snapshot.FileSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk chunks[chunkHash] = true // The chunk is a snapshot chunk
} }
for _, chunkHash := range snapshot.ChunkSequence { for _, chunkHash := range snapshot.ChunkSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk chunks[chunkHash] = true // The chunk is a snapshot chunk
} }
for _, chunkHash := range snapshot.LengthSequence { for _, chunkHash := range snapshot.LengthSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk chunks[chunkHash] = true // The chunk is a snapshot chunk
} }
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence) description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
@@ -1845,7 +1691,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
for _, chunkHash := range snapshot.ChunkHashes { for _, chunkHash := range snapshot.ChunkHashes {
if _, found := chunks[chunkHash]; !found { if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = false // The chunk is a file chunk chunks[chunkHash] = false // The chunk is a file chunk
} }
} }
@@ -1877,7 +1723,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
} }
} }
LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks)-len(chunksToCopy), len(chunks)) LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks) - len(chunksToCopy), len(chunks))
chunkDownloader := CreateChunkOperator(manager.config, manager.storage, nil, false, false, downloadingThreads, false) chunkDownloader := CreateChunkOperator(manager.config, manager.storage, nil, false, false, downloadingThreads, false)
@@ -1886,7 +1732,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
copiedChunks := 0 copiedChunks := 0
chunkUploader := CreateChunkOperator(otherManager.config, otherManager.storage, nil, false, false, uploadingThreads, false) chunkUploader := CreateChunkOperator(otherManager.config, otherManager.storage, nil, false, false, uploadingThreads, false)
chunkUploader.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) { chunkUploader.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
action := "Skipped" action := "Skipped"
if !skipped { if !skipped {
copiedChunks++ copiedChunks++
@@ -1897,11 +1743,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
elapsedTime := time.Now().Sub(startTime).Seconds() elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(atomic.LoadInt64(&uploadedBytes)) / elapsedTime) speed := int64(float64(atomic.LoadInt64(&uploadedBytes)) / elapsedTime)
remainingTime := int64(float64(len(chunksToCopy)-chunkIndex-1) / float64(chunkIndex+1) * elapsedTime) remainingTime := int64(float64(len(chunksToCopy) - chunkIndex - 1) / float64(chunkIndex + 1) * elapsedTime)
percentage := float64(chunkIndex+1) / float64(len(chunksToCopy)) * 100.0 percentage := float64(chunkIndex + 1) / float64(len(chunksToCopy)) * 100.0
LOG_INFO("COPY_PROGRESS", "%s chunk %s (%d/%d) %sB/s %s %.1f%%", LOG_INFO("COPY_PROGRESS", "%s chunk %s (%d/%d) %sB/s %s %.1f%%",
action, chunk.GetID(), chunkIndex+1, len(chunksToCopy), action, chunk.GetID(), chunkIndex + 1, len(chunksToCopy),
PrettySize(speed), PrettyTime(remainingTime), percentage) PrettySize(speed), PrettyTime(remainingTime), percentage)
otherManager.config.PutChunk(chunk) otherManager.config.PutChunk(chunk)
} }
@@ -1924,7 +1770,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
chunkDownloader.Stop() chunkDownloader.Stop()
chunkUploader.Stop() chunkUploader.Stop()
LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks)-copiedChunks) LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks) - copiedChunks)
for _, snapshot := range snapshots { for _, snapshot := range snapshots {
if revisionMap[snapshot.ID][snapshot.Revision] == false { if revisionMap[snapshot.ID][snapshot.Revision] == false {

View File

@@ -4,11 +4,8 @@
package duplicacy package duplicacy
import ( import (
"bytes"
"crypto/sha256"
"encoding/base64" "encoding/base64"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os" "os"
@@ -18,15 +15,12 @@ import (
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"syscall"
"time" "time"
"bytes"
"crypto/sha256"
"github.com/vmihailenco/msgpack" "github.com/vmihailenco/msgpack"
)
const (
entrySymHardLinkRootChunkMarker = -72
entrySymHardLinkTargetChunkMarker = -73
) )
// This is the hidden directory in the repository for storing various files. // This is the hidden directory in the repository for storing various files.
@@ -116,36 +110,15 @@ func (entry *Entry) Copy() *Entry {
UID: entry.UID, UID: entry.UID,
GID: entry.GID, GID: entry.GID,
StartChunk: entry.StartChunk, StartChunk: entry.StartChunk,
StartOffset: entry.StartOffset, StartOffset: entry.StartOffset,
EndChunk: entry.EndChunk, EndChunk: entry.EndChunk,
EndOffset: entry.EndOffset, EndOffset: entry.EndOffset,
Attributes: entry.Attributes, Attributes: entry.Attributes,
} }
} }
func (entry *Entry) HardLinkTo(target *Entry, startChunk int, endChunk int) *Entry {
return &Entry{
Path: entry.Path,
Size: target.Size,
Time: target.Time,
Mode: target.Mode,
Link: entry.Link,
Hash: target.Hash,
UID: target.UID,
GID: target.GID,
StartChunk: startChunk,
StartOffset: target.StartOffset,
EndChunk: endChunk,
EndOffset: target.EndOffset,
Attributes: target.Attributes,
}
}
// CreateEntryFromJSON creates an entry from a json description. // CreateEntryFromJSON creates an entry from a json description.
func (entry *Entry) UnmarshalJSON(description []byte) (err error) { func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
@@ -389,12 +362,12 @@ func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error {
if entry.Attributes != nil { if entry.Attributes != nil {
attributes := make([]string, numberOfAttributes) attributes := make([]string, numberOfAttributes)
i := 0 i := 0
for attribute := range *entry.Attributes { for attribute := range *entry.Attributes {
attributes[i] = attribute attributes[i] = attribute
i++ i++
} }
sort.Strings(attributes) sort.Strings(attributes)
for _, attribute := range attributes { for _, attribute := range attributes {
err = encoder.EncodeString(attribute) err = encoder.EncodeString(attribute)
if err != nil { if err != nil {
@@ -407,7 +380,7 @@ func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error {
} }
} }
return nil return nil
} }
func (entry *Entry) DecodeMsgpack(decoder *msgpack.Decoder) error { func (entry *Entry) DecodeMsgpack(decoder *msgpack.Decoder) error {
@@ -515,46 +488,18 @@ func (entry *Entry) IsLink() bool {
return entry.Mode&uint32(os.ModeSymlink) != 0 return entry.Mode&uint32(os.ModeSymlink) != 0
} }
func (entry *Entry) IsSpecial() bool {
return entry.Mode&uint32(os.ModeNamedPipe|os.ModeDevice|os.ModeCharDevice) != 0
}
func (entry *Entry) IsFileOrSpecial() bool {
return entry.Mode&uint32(os.ModeDir|os.ModeSymlink|os.ModeIrregular) == 0
}
func (entry *Entry) IsComplete() bool { func (entry *Entry) IsComplete() bool {
return entry.Size >= 0 return entry.Size >= 0
} }
func (entry *Entry) IsHardlinkedFrom() bool {
return (entry.IsFileOrSpecial() && len(entry.Link) > 0 && entry.Link != "/") || (entry.IsLink() && entry.StartChunk == entrySymHardLinkTargetChunkMarker)
}
func (entry *Entry) IsHardlinkRoot() bool {
return (entry.IsFileOrSpecial() && entry.Link == "/") || (entry.IsLink() && entry.StartChunk == entrySymHardLinkRootChunkMarker)
}
func (entry *Entry) GetHardlinkId() (int, error) {
if entry.IsLink() {
if entry.StartChunk != entrySymHardLinkTargetChunkMarker {
return 0, errors.New("Symlink entry not marked as hardlinked")
}
return entry.StartOffset, nil
} else {
i, err := strconv.ParseUint(entry.Link, 16, 64)
return int(i), err
}
}
func (entry *Entry) GetPermissions() os.FileMode { func (entry *Entry) GetPermissions() os.FileMode {
return os.FileMode(entry.Mode) & fileModeMask return os.FileMode(entry.Mode) & fileModeMask
} }
func (entry *Entry) GetParent() string { func (entry *Entry) GetParent() string {
path := entry.Path path := entry.Path
if path != "" && path[len(path)-1] == '/' { if path != "" && path[len(path) - 1] == '/' {
path = path[:len(path)-1] path = path[:len(path) - 1]
} }
i := strings.LastIndex(path, "/") i := strings.LastIndex(path, "/")
if i == -1 { if i == -1 {
@@ -605,10 +550,6 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
} }
} }
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
entry.SetAttributesToFile(fullPath)
}
// Only set the time if the file is not a symlink // Only set the time if the file is not a symlink
if !entry.IsLink() && (*fileInfo).ModTime().Unix() != entry.Time { if !entry.IsLink() && (*fileInfo).ModTime().Unix() != entry.Time {
modifiedTime := time.Unix(entry.Time, 0) modifiedTime := time.Unix(entry.Time, 0)
@@ -619,6 +560,10 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
} }
} }
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
entry.SetAttributesToFile(fullPath)
}
return true return true
} }
@@ -651,7 +596,7 @@ func ComparePaths(left string, right string) int {
for i := p; i < len(left); i++ { for i := p; i < len(left); i++ {
c3 = left[i] c3 = left[i]
if c3 == '/' { if c3 == '/' {
last1 = i == len(left)-1 last1 = i == len(left) - 1
break break
} }
} }
@@ -661,7 +606,7 @@ func ComparePaths(left string, right string) int {
for i := p; i < len(right); i++ { for i := p; i < len(right); i++ {
c4 = right[i] c4 = right[i]
if c4 == '/' { if c4 == '/' {
last2 = i == len(right)-1 last2 = i == len(right) - 1
break break
} }
} }
@@ -749,27 +694,10 @@ func (files FileInfoCompare) Less(i, j int) bool {
} }
} }
type listEntryLinkKey struct {
dev uint64
ino uint64
}
type ListingState struct {
linkIndex int
linkTable map[listEntryLinkKey]int // map unique inode details to initially found path
}
func NewListingState() *ListingState {
return &ListingState{
linkTable: make(map[listEntryLinkKey]int),
}
}
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths // ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files. // are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool, func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool, listingChannel chan *Entry) (directoryList []*Entry,
listingState *ListingState, skippedFiles []string, err error) {
listingChannel chan *Entry) (directoryList []*Entry, skippedFiles []string, err error) {
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path) LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
@@ -807,44 +735,10 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
if f.Name() == DUPLICACY_DIRECTORY { if f.Name() == DUPLICACY_DIRECTORY {
continue continue
} }
if f.Mode()&os.ModeSocket != 0 {
continue
}
entry := CreateEntryFromFileInfo(f, normalizedPath) entry := CreateEntryFromFileInfo(f, normalizedPath)
if len(patterns) > 0 && !MatchPath(entry.Path, patterns) { if len(patterns) > 0 && !MatchPath(entry.Path, patterns) {
continue continue
} }
var linkKey *listEntryLinkKey
if runtime.GOOS != "windows" && !entry.IsDir() {
if stat := f.Sys().(*syscall.Stat_t); stat != nil && stat.Nlink > 1 {
k := listEntryLinkKey{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if linkIndex, seen := listingState.linkTable[k]; seen {
if linkIndex == -1 {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute (hardlink)", entry.Path)
continue
}
entry.Size = 0
if entry.IsLink() {
entry.StartChunk = entrySymHardLinkTargetChunkMarker
entry.StartOffset = linkIndex
} else {
entry.Link = strconv.FormatInt(int64(linkIndex), 16)
}
} else {
if entry.IsLink() {
entry.StartChunk = entrySymHardLinkRootChunkMarker
} else {
entry.Link = "/"
}
listingState.linkTable[k] = -1
linkKey = &k
}
}
}
if entry.IsLink() { if entry.IsLink() {
isRegular := false isRegular := false
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path)) isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
@@ -875,12 +769,6 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
} }
entry = newEntry entry = newEntry
} }
} else if entry.IsSpecial() {
if !entry.ReadSpecial(f) {
LOG_WARN("LIST_DEV", "Failed to save device node %s", entry.Path)
skippedFiles = append(skippedFiles, entry.Path)
continue
}
} }
entry.ReadAttributes(top) entry.ReadAttributes(top)
@@ -890,9 +778,10 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
continue continue
} }
if linkKey != nil { if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
listingState.linkTable[*linkKey] = listingState.linkIndex LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
listingState.linkIndex++ skippedFiles = append(skippedFiles, entry.Path)
continue
} }
if entry.IsDir() { if entry.IsDir() {

View File

@@ -111,12 +111,12 @@ func (entryList *EntryList)createOnDiskFile() error {
// Add an entry to the entry list // Add an entry to the entry list
func (entryList *EntryList)AddEntry(entry *Entry) error { func (entryList *EntryList)AddEntry(entry *Entry) error {
if entry.IsFile() { if !entry.IsDir() && !entry.IsLink() {
entryList.NumberOfEntries++ entryList.NumberOfEntries++
} }
if !entry.IsComplete() { if !entry.IsComplete() {
if !entry.IsFile() { if entry.IsDir() || entry.IsLink() {
entry.Size = 0 entry.Size = 0
} else { } else {
modifiedEntry := ModifiedEntry { modifiedEntry := ModifiedEntry {

View File

@@ -68,7 +68,6 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
skippedDirectories *[]string, skippedFiles *[]string) { skippedDirectories *[]string, skippedFiles *[]string) {
var patterns []string var patterns []string
listingState := NewListingState()
if filtersFile == "" { if filtersFile == "" {
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters") filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
@@ -82,7 +81,7 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
directory := directories[len(directories)-1] directory := directories[len(directories)-1]
directories = directories[:len(directories)-1] directories = directories[:len(directories)-1]
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingState, listingChannel) subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingChannel)
if err != nil { if err != nil {
if directory.Path == "" { if directory.Path == "" {
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err) LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)

View File

@@ -7,15 +7,14 @@ package duplicacy
import ( import (
"bufio" "bufio"
"crypto/sha256" "crypto/sha256"
"encoding/json"
"fmt" "fmt"
"io" "io"
"os" "os"
"regexp" "regexp"
"runtime"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"runtime"
"github.com/gilbertchen/gopass" "github.com/gilbertchen/gopass"
"golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/pbkdf2"
@@ -57,7 +56,7 @@ func IsEmptyFilter(pattern string) bool {
} }
func IsUnspecifiedFilter(pattern string) bool { func IsUnspecifiedFilter(pattern string) bool {
if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") { if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") {
return true return true
} else { } else {
return false return false
@@ -276,6 +275,7 @@ func SavePassword(preference Preference, passwordType string, password string) {
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss, // The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
// Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd' // Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd'
// against '*ccd', and the version here fixed that issue. // against '*ccd', and the version here fixed that issue.
//
func matchPattern(text string, pattern string) bool { func matchPattern(text string, pattern string) bool {
textLength := len(text) textLength := len(text)
@@ -469,39 +469,8 @@ func PrintMemoryUsage() {
runtime.ReadMemStats(&m) runtime.ReadMemStats(&m)
LOG_INFO("MEMORY_STATS", "Currently allocated: %s, total allocated: %s, system memory: %s, number of GCs: %d", LOG_INFO("MEMORY_STATS", "Currently allocated: %s, total allocated: %s, system memory: %s, number of GCs: %d",
PrettySize(int64(m.Alloc)), PrettySize(int64(m.TotalAlloc)), PrettySize(int64(m.Sys)), m.NumGC) PrettySize(int64(m.Alloc)), PrettySize(int64(m.TotalAlloc)), PrettySize(int64(m.Sys)), m.NumGC)
time.Sleep(time.Second) time.Sleep(time.Second)
} }
} }
func (entry *Entry) dump() map[string]interface{} {
object := make(map[string]interface{})
object["path"] = entry.Path
object["size"] = entry.Size
object["time"] = entry.Time
object["mode"] = entry.Mode
object["hash"] = entry.Hash
object["link"] = entry.Link
object["content"] = fmt.Sprintf("%d:%d:%d:%d",
entry.StartChunk, entry.StartOffset, entry.EndChunk, entry.EndOffset)
if entry.UID != -1 && entry.GID != -1 {
object["uid"] = entry.UID
object["gid"] = entry.GID
}
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
object["attributes"] = entry.Attributes
}
return object
}
func (entry *Entry) dumpString() string {
data, _ := json.Marshal(entry.dump())
return string(data)
}

View File

@@ -0,0 +1,53 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd || darwin
// +build freebsd netbsd darwin
package duplicacy
import (
"encoding/binary"
"os"
"syscall"
)
const bsdFileFlagsKey = "\x00bf"
func (entry *Entry) ReadFileFlags(f *os.File) error {
fileInfo, err := f.Stat()
if err != nil {
return err
}
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if ok && stat.Flags != 0 {
if entry.Attributes == nil {
entry.Attributes = &map[string][]byte{}
}
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, stat.Flags)
(*entry.Attributes)[bsdFileFlagsKey] = v
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", stat.Flags, entry.Path)
}
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
LOG_DEBUG("ATTR_RESTORE", "Restore flags 0x%x for %s", binary.LittleEndian.Uint32(v), entry.Path)
return syscall.Fchflags(int(f.Fd()), int(binary.LittleEndian.Uint32(v)))
}
return nil
}

View File

@@ -1,94 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd || darwin
// +build freebsd netbsd darwin
package duplicacy
import (
"bytes"
"encoding/binary"
"os"
"path/filepath"
"syscall"
"github.com/pkg/xattr"
)
const bsdFileFlagsKey = "\x00bf"
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
fileInfo, err := os.Lstat(fullPath)
if err != nil {
return
}
if !entry.IsSpecial() {
attributes, _ := xattr.LList(fullPath)
if len(attributes) > 0 {
entry.Attributes = &map[string][]byte{}
for _, name := range attributes {
attribute, err := xattr.LGet(fullPath, name)
if err == nil {
(*entry.Attributes)[name] = attribute
}
}
}
}
if err := entry.readFileFlags(fileInfo); err != nil {
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
}
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
if !entry.IsSpecial() {
names, _ := xattr.LList(fullPath)
for _, name := range names {
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := xattr.LGet(fullPath, name)
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.LSet(fullPath, name, newAttribute)
}
delete(*entry.Attributes, name)
} else {
xattr.LRemove(fullPath, name)
}
}
for name, attribute := range *entry.Attributes {
if len(name) > 0 && name[0] == '\x00' {
continue
}
xattr.LSet(fullPath, name, attribute)
}
}
if err := entry.restoreLateFileFlags(fullPath); err != nil {
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
}
}
func (entry *Entry) readFileFlags(fileInfo os.FileInfo) error {
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if ok && stat.Flags != 0 {
if entry.Attributes == nil {
entry.Attributes = &map[string][]byte{}
}
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, stat.Flags)
(*entry.Attributes)[bsdFileFlagsKey] = v
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", stat.Flags, entry.Path)
}
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}

View File

@@ -5,29 +5,10 @@
package duplicacy package duplicacy
import ( import (
"encoding/binary"
"os"
"strings" "strings"
"syscall"
) )
func excludedByAttribute(attributes map[string][]byte) bool { func excludedByAttribute(attributes map[string][]byte) bool {
value, ok := attributes["com.apple.metadata:com_apple_backup_excludeItem"] value, ok := attributes["com.apple.metadata:com_apple_backup_excludeItem"]
return ok && strings.Contains(string(value), "com.apple.backupd") return ok && strings.Contains(string(value), "com.apple.backupd")
} }
func (entry *Entry) restoreLateFileFlags(path string) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
f, err := os.OpenFile(path, os.O_RDONLY|syscall.O_SYMLINK, 0)
if err != nil {
return err
}
err = syscall.Fchflags(int(f.Fd()), int(binary.LittleEndian.Uint32(v)))
f.Close()
return err
}
return nil
}

View File

@@ -5,14 +5,10 @@
package duplicacy package duplicacy
import ( import (
"bytes"
"encoding/binary" "encoding/binary"
"os" "os"
"path/filepath"
"syscall" "syscall"
"unsafe" "unsafe"
"github.com/pkg/xattr"
) )
const ( const (
@@ -51,116 +47,7 @@ func ioctl(f *os.File, request uintptr, attrp *uint32) error {
return nil return nil
} }
type xattrHandle struct { func (entry *Entry) ReadFileFlags(f *os.File) error {
f *os.File
fullPath string
}
func (x xattrHandle) list() ([]string, error) {
if x.f != nil {
return xattr.FList(x.f)
} else {
return xattr.LList(x.fullPath)
}
}
func (x xattrHandle) get(name string) ([]byte, error) {
if x.f != nil {
return xattr.FGet(x.f, name)
} else {
return xattr.LGet(x.fullPath, name)
}
}
func (x xattrHandle) set(name string, value []byte) error {
if x.f != nil {
return xattr.FSet(x.f, name, value)
} else {
return xattr.LSet(x.fullPath, name, value)
}
}
func (x xattrHandle) remove(name string) error {
if x.f != nil {
return xattr.FRemove(x.f, name)
} else {
return xattr.LRemove(x.fullPath, name)
}
}
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
x := xattrHandle{nil, fullPath}
if !entry.IsLink() {
var err error
x.f, err = os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW|syscall.O_NONBLOCK, 0)
if err != nil {
// FIXME: We really should return errors for failure to read
return
}
}
attributes, _ := x.list()
if len(attributes) > 0 {
entry.Attributes = &map[string][]byte{}
}
for _, name := range attributes {
attribute, err := x.get(name)
if err == nil {
(*entry.Attributes)[name] = attribute
}
}
if entry.IsFile() || entry.IsDir() {
if err := entry.readFileFlags(x.f); err != nil {
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
}
}
x.f.Close()
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
x := xattrHandle{nil, fullPath}
if !entry.IsLink() {
var err error
x.f, err = os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return
}
}
names, _ := x.list()
for _, name := range names {
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := x.get(name)
if !bytes.Equal(oldAttribute, newAttribute) {
x.set(name, newAttribute)
}
delete(*entry.Attributes, name)
} else {
x.remove(name)
}
}
for name, attribute := range *entry.Attributes {
if len(name) > 0 && name[0] == '\x00' {
continue
}
x.set(name, attribute)
}
if entry.IsFile() || entry.IsDir() {
if err := entry.restoreLateFileFlags(x.f); err != nil {
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
}
}
x.f.Close()
}
func (entry *Entry) readFileFlags(f *os.File) error {
var flags uint32 var flags uint32
if err := ioctl(f, linux_FS_IOC_GETFLAGS, &flags); err != nil { if err := ioctl(f, linux_FS_IOC_GETFLAGS, &flags); err != nil {
return err return err
@@ -207,7 +94,7 @@ func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil return nil
} }
func (entry *Entry) restoreLateFileFlags(f *os.File) error { func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
if entry.Attributes == nil { if entry.Attributes == nil {
return nil return nil
} }

View File

@@ -2,17 +2,18 @@
// Free for personal use and commercial trial // Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com // Commercial use requires per-user licenses available from https://duplicacy.com
//go:build !windows
// +build !windows // +build !windows
package duplicacy package duplicacy
import ( import (
"bytes"
"os" "os"
"path" "path"
"path/filepath"
"syscall" "syscall"
"golang.org/x/sys/unix" "github.com/pkg/xattr"
) )
func Readlink(path string) (isRegular bool, s string, err error) { func Readlink(path string) (isRegular bool, s string, err error) {
@@ -46,50 +47,58 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
return true return true
} }
func (entry *Entry) ReadSpecial(fileInfo os.FileInfo) bool { func (entry *Entry) ReadAttributes(top string) {
if fileInfo.Mode()&(os.ModeDevice|os.ModeCharDevice) == 0 { fullPath := filepath.Join(top, entry.Path)
return true f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return
} }
stat := fileInfo.Sys().(*syscall.Stat_t) attributes, _ := xattr.FList(f)
if stat == nil { if len(attributes) > 0 {
return false entry.Attributes = &map[string][]byte{}
for _, name := range attributes {
attribute, err := xattr.Get(fullPath, name)
if err == nil {
(*entry.Attributes)[name] = attribute
}
}
} }
entry.Size = 0 if err := entry.ReadFileFlags(f); err != nil {
rdev := uint64(stat.Rdev) LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
entry.StartChunk = int(rdev & 0xFFFFFFFF) }
entry.StartOffset = int(rdev >> 32) f.Close()
return true
} }
func (entry *Entry) GetRdev() uint64 { func (entry *Entry) SetAttributesToFile(fullPath string) {
return uint64(entry.StartChunk) | uint64(entry.StartOffset)<<32 f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
} if err != nil {
return
func (entry *Entry) RestoreSpecial(fullPath string) error {
mode := entry.Mode & uint32(fileModeMask)
if entry.Mode&uint32(os.ModeNamedPipe) != 0 {
mode |= syscall.S_IFIFO
} else if entry.Mode&uint32(os.ModeCharDevice) != 0 {
mode |= syscall.S_IFCHR
} else if entry.Mode&uint32(os.ModeDevice) != 0 {
mode |= syscall.S_IFBLK
} else {
return nil
} }
return syscall.Mknod(fullPath, mode, int(entry.GetRdev()))
}
func (entry *Entry) IsSameSpecial(fileInfo os.FileInfo) bool { names, _ := xattr.FList(f)
stat := fileInfo.Sys().(*syscall.Stat_t) for _, name := range names {
if stat == nil { newAttribute, found := (*entry.Attributes)[name]
return false if found {
oldAttribute, _ := xattr.FGet(f, name)
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.FSet(f, name, newAttribute)
}
delete(*entry.Attributes, name)
} else {
xattr.FRemove(f, name)
}
} }
return (uint32(fileInfo.Mode()) == entry.Mode) && (uint64(stat.Rdev) == entry.GetRdev())
}
func MakeHardlink(source string, target string) error { for name, attribute := range *entry.Attributes {
return unix.Linkat(unix.AT_FDCWD, source, unix.AT_FDCWD, target, 0) if len(name) > 0 && name[0] == '\x00' {
continue
}
xattr.FSet(f, name, attribute)
}
if err := entry.RestoreLateFileFlags(f); err != nil {
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
}
f.Close()
} }
func joinPath(components ...string) string { func joinPath(components ...string) string {

View File

@@ -117,18 +117,6 @@ func (entry *Entry) SetAttributesToFile(fullPath string) {
} }
func (entry *Entry) ReadDeviceNode(fileInfo os.FileInfo) bool {
return nil
}
func (entry *Entry) RestoreSpecial(fullPath string) error {
return nil
}
func MakeHardlink(source string, target string) error {
return os.Link(source, target)
}
func joinPath(components ...string) string { func joinPath(components ...string) string {
combinedPath := `\\?\` + filepath.Join(components...) combinedPath := `\\?\` + filepath.Join(components...)

View File

@@ -1,30 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd
// +build freebsd netbsd
package duplicacy
import (
"bytes"
"encoding/binary"
"os"
"path/filepath"
"syscall"
"github.com/pkg/xattr"
)
func (entry *Entry) restoreLateFileFlags(path string) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
if _, _, errno := syscall.Syscall(syscall.SYS_LCHFLAGS, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), uintptr(v), 0); errno != 0 {
return os.NewSyscallError("lchflags", errno)
}
}
return nil
}