Compare commits

..

7 Commits

Author SHA1 Message Date
96e7c93a2c Support backup and restore of special files on POSIX style systems
Special files are device nodes and named pipes. The necessity of the
former is clear, the latter is debatable.
In order to preserve backward compatibility, the device number is
encoded in the StartChunk/StartOffset fields of the entry.
2023-10-03 16:26:19 -05:00
f06779659e Don't overwrite symlinks if file already exists 2023-10-03 15:08:47 -05:00
16885eaa61 Support backup and restore of hardlinks
This tracks inode/device from the stat info and creates backward
compatible snapshots that allow preserving hardlinks. Backwards
compatibility is preserved by saving a virtual inode number index in the
Link field of the file entry. Since this field was previously only used
for symlinks, this won't break old versions. Additionally, the entry
data is cloned so restoration with an old version works.

Current limitations are primarility with restore. They include:
- no command line option to prevent hard link restore
- if a file has the immutable or append only flag it will be set before
hardlinks are restored, so hardlinking will fail.
- if a partial restore includes a hardlink but not the parent
directories the hardlink will fail.

These will be solved by grouping restore of hardlinks together
with file, prior to applying final metadata.

- if a file is changed and is being rewritten by a restore hardlinks are
not preserved.
2023-10-03 12:21:46 -05:00
bf2565b5c3 Initial implementation of file/inode flags (Linux, BSD, darwin)
Basic support for BSD and Darwin style chflags (stat flags). Applies
these flags at the end of file restore.
Supports linux style ioctl_iflags(2) in a 2 step process. Flags that
need to be applied prior to writes such as compress and especially no-COW
are applied immediately upon file open.

The flags format is backwards compatible. An attribute starting with a
null byte is used to store flags in the entry attributes table. With
an old version of duplicacy the restore of this attribute should silently
fail (effectively be ignored).

Fixes xattr restore to use O_NOFOLLOW so attributes are applied to symlink.

TODO: Tests, possible option to switch off mutable/append prior to
restore of existing file similar to rsync. Does not apply attributes
or flags to the top most directory.
2023-10-03 12:15:54 -05:00
c07eef5063 Increase b2 client max file listing count to 10000
Considerable speed improvement with listing large storage.
2023-10-02 12:46:02 -05:00
2fdedcb9dd Fix exclude_by_attribute feature on POSIX
The exclude by attribute function is broken on non-Darwin POSIX: linux and freebsd.
This is because those xattrs must be prefixed by a legal namespace. The old xattr
library implicitly appended the user namespace to the xattr, but the current
official go pkg does not (which is just as well).

Also fix the test to remove the discordant old xattr dependency and provide
test cases for both darwin and non-darwin POSIX.
2023-10-02 12:41:50 -05:00
7bdd1cabd3 Use S3 ListObjectsV2 for listing files
ListObjects has been deprecated since 2016 and ListObjectsV2 with use of
explicit pagination tokens is more performant for large listings as well.

This also mitigates an issue with iDrive E2 where the StartAfter/Marker
is included in the output, leading to duplicate entries. Right now this
causes an exhaustive prune to delete chunks erroneously flagged as
duplicate, destroying the storage.
2023-10-02 12:41:50 -05:00
12 changed files with 216 additions and 527 deletions

View File

@@ -2262,7 +2262,7 @@ func main() {
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "3.2.2" + " (" + GitCommit + ")"
app.Version = "3.2.1" + " (" + GitCommit + ")"
// Exit with code 2 if an invalid command is provided
app.CommandNotFound = func(context *cli.Context, command string) {

View File

@@ -21,7 +21,7 @@ import (
"sync/atomic"
"time"
"github.com/vmihailenco/msgpack"
"github.com/vmihailenco/msgpack"
)
// BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to
@@ -36,9 +36,9 @@ type BackupManager struct {
config *Config // contains a number of options
nobackupFile string // don't backup directory when this file name is found
filtersFile string // the path to the filters file
excludeByAttribute bool // don't backup file based on file attribute
nobackupFile string // don't backup directory when this file name is found
filtersFile string // the path to the filters file
excludeByAttribute bool // don't backup file based on file attribute
cachePath string
}
@@ -146,7 +146,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
manager.config.DataShards, manager.config.ParityShards)
manager.config.DataShards, manager.config.ParityShards)
}
if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 {
@@ -187,7 +187,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// If the listing operation is fast and this is an initial backup, list all chunks and
// put them in the cache.
if manager.storage.IsFastListing() && remoteSnapshot.Revision == 0 {
if (manager.storage.IsFastListing() && remoteSnapshot.Revision == 0) {
LOG_INFO("BACKUP_LIST", "Listing all chunks")
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
@@ -222,7 +222,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var totalModifiedFileSize int64 // total size of modified files
var uploadedModifiedFileSize int64 // portions that have been uploaded (including cache hits)
var preservedFileSize int64 // total size of unmodified files
var preservedFileSize int64 // total size of unmodified files
localSnapshot := CreateEmptySnapshot(manager.snapshotID)
localSnapshot.Revision = remoteSnapshot.Revision + 1
@@ -239,7 +239,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// List local files
defer CatchLogException()
localSnapshot.ListLocalFiles(shadowTop, manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute, localListingChannel, &skippedDirectories, &skippedFiles)
}()
} ()
go func() {
// List remote files
@@ -261,7 +261,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
})
}
close(remoteListingChannel)
}()
} ()
// Create the local file list
localEntryList, err := CreateEntryList(manager.snapshotID, manager.cachePath, maximumInMemoryEntries)
@@ -275,7 +275,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var remoteEntry *Entry
remoteListingOK := true
for {
localEntry := <-localListingChannel
localEntry := <- localListingChannel
if localEntry == nil {
break
}
@@ -289,7 +289,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
compareResult = localEntry.Compare(remoteEntry)
} else {
if remoteListingOK {
remoteEntry, remoteListingOK = <-remoteListingChannel
remoteEntry, remoteListingOK = <- remoteListingChannel
}
if !remoteListingOK {
compareResult = -1
@@ -304,7 +304,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
remoteEntry = nil
}
if compareResult == 0 {
if compareResult == 0 {
// No need to check if it is in hash mode -- in that case remote listing is nil
if localEntry.IsSameAs(remoteEntry) && localEntry.IsFile() {
if localEntry.Size > 0 {
@@ -339,8 +339,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// compareResult must be < 0; the local file is new
totalModifiedFileSize += localEntry.Size
if localEntry.Size > 0 {
// A size of -1 indicates this is a modified file that will be uploaded
localEntry.Size = -1
// A size of -1 indicates this is a modified file that will be uploaded
localEntry.Size = -1
}
}
@@ -448,7 +448,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
_, found := chunkCache[chunkID]
if found {
if time.Now().Unix()-lastUploadingTime > keepUploadAlive {
if time.Now().Unix() - lastUploadingTime > keepUploadAlive {
LOG_INFO("UPLOAD_KEEPALIVE", "Skip chunk cache to keep connection alive")
found = false
}
@@ -558,7 +558,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
if showStatistics {
LOG_INFO("BACKUP_STATS", "Files: %d total, %s bytes; %d new, %s bytes",
localEntryList.NumberOfEntries-int64(len(skippedFiles)),
localEntryList.NumberOfEntries - int64(len(skippedFiles)),
PrettyNumber(preservedFileSize+uploadedFileSize),
len(localEntryList.ModifiedEntries), PrettyNumber(uploadedFileSize))
@@ -686,7 +686,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
// List local files
defer CatchLogException()
localSnapshot.ListLocalFiles(top, manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute, localListingChannel, nil, nil)
}()
} ()
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
manager.SnapshotManager.DownloadSnapshotSequences(remoteSnapshot)
@@ -698,41 +698,18 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
return true
})
close(remoteListingChannel)
}()
} ()
var localEntry *Entry
localListingOK := true
type hardLinkEntry struct {
entry *Entry
willExist bool
entry *Entry
willDownload bool
}
var hardLinkTable []hardLinkEntry
var hardLinks []*Entry
restoreHardlink := func(entry *Entry, fullPath string) bool {
if entry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1].willExist = true
} else if entry.IsHardlinkedFrom() {
i, err := entry.GetHardlinkId()
if err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Decode error for hardlinked entry %s, %v", entry.Path, err)
return false
}
if !hardLinkTable[i].willExist {
hardLinkTable[i] = hardLinkEntry{entry, true}
} else {
sourcePath := joinPath(top, hardLinkTable[i].entry.Path)
LOG_INFO("RESTORE_HARDLINK", "Hard linking %s to %s", fullPath, sourcePath)
if err := MakeHardlink(sourcePath, fullPath); err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Failed to create hard link %s to %s %v", fullPath, sourcePath, err)
}
return true
}
}
return false
}
for remoteEntry := range remoteListingChannel {
if remoteEntry.IsHardlinkRoot() {
@@ -748,7 +725,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
for {
if localEntry == nil && localListingOK {
localEntry, localListingOK = <-localListingChannel
localEntry, localListingOK = <- localListingChannel
}
if localEntry == nil {
compareResult = 1
@@ -775,16 +752,12 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
}
fullPath := joinPath(top, remoteEntry.Path)
if remoteEntry.IsLink() {
if stat, _ := os.Lstat(fullPath); stat != nil {
if stat.Mode()&os.ModeSymlink != 0 {
isRegular, link, err := Readlink(fullPath)
if err == nil && link == remoteEntry.Link && !isRegular {
remoteEntry.RestoreMetadata(fullPath, nil, setOwner)
if remoteEntry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1].willExist = true
}
continue
}
}
@@ -798,16 +771,11 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
os.Remove(fullPath)
}
if restoreHardlink(remoteEntry, fullPath) {
continue
}
if err := os.Symlink(remoteEntry.Link, fullPath); err != nil {
LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", remoteEntry.Path, err)
return 0
}
remoteEntry.RestoreMetadata(fullPath, nil, setOwner)
LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", remoteEntry.Path)
} else if remoteEntry.IsDir() {
@@ -831,12 +799,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
directoryEntries = append(directoryEntries, remoteEntry)
} else if remoteEntry.IsSpecial() {
if stat, _ := os.Lstat(fullPath); stat != nil {
if remoteEntry.IsSameSpecial(stat) {
remoteEntry.RestoreMetadata(fullPath, nil, setOwner)
if remoteEntry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1].willExist = true
}
}
if !overwrite {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", remoteEntry.Path)
@@ -845,10 +807,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
os.Remove(fullPath)
}
if restoreHardlink(remoteEntry, fullPath) {
continue
}
if err := remoteEntry.RestoreSpecial(fullPath); err != nil {
LOG_ERROR("RESTORE_SPECIAL", "Unable to restore special file %s: %v", remoteEntry.Path, err)
return 0
@@ -856,21 +814,20 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
remoteEntry.RestoreMetadata(fullPath, nil, setOwner)
} else {
if remoteEntry.IsHardlinkRoot() {
hardLinkTable[len(hardLinkTable)-1].willExist = true
hardLinkTable[len(hardLinkTable)-1] = hardLinkEntry{remoteEntry, true}
} else if remoteEntry.IsHardlinkedFrom() {
i, err := remoteEntry.GetHardlinkId()
if err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Decode error for hardlinked entry %s, %v", remoteEntry.Path, err)
i, err := strconv.ParseUint(remoteEntry.Link, 16, 64)
if err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Decode error in hardlink entry, expected hex int, got %s", remoteEntry.Link)
return 0
}
if !hardLinkTable[i].willExist {
if !hardLinkTable[i].willDownload {
hardLinkTable[i] = hardLinkEntry{remoteEntry, true}
} else {
hardLinks = append(hardLinks, remoteEntry)
continue
}
}
// We can't download files here since fileEntries needs to be sorted
fileEntries = append(fileEntries, remoteEntry)
totalFileSize += remoteEntry.Size
@@ -882,7 +839,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
}
for localListingOK {
localEntry, localListingOK = <-localListingChannel
localEntry, localListingOK = <- localListingChannel
if localEntry != nil {
extraFiles = append(extraFiles, localEntry.Path)
}
@@ -991,32 +948,11 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
}
for _, linkEntry := range hardLinks {
i, _ := linkEntry.GetHardlinkId()
i, _ := strconv.ParseUint(linkEntry.Link, 16, 64)
sourcePath := joinPath(top, hardLinkTable[i].entry.Path)
fullPath := joinPath(top, linkEntry.Path)
if stat, _ := os.Lstat(fullPath); stat != nil {
sourceStat, _ := os.Lstat(sourcePath)
if os.SameFile(stat, sourceStat) {
continue
}
if sourceStat == nil {
LOG_WERROR(allowFailures, "RESTORE_HARDLINK",
"Target %s for hardlink %s is missing", sourcePath, linkEntry.Path)
continue
}
if !overwrite {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", linkEntry.Path)
continue
}
os.Remove(fullPath)
}
LOG_DEBUG("RESTORE_HARDLINK", "Hard linking %s to %s", fullPath, sourcePath)
if err := MakeHardlink(sourcePath, fullPath); err != nil {
LOG_INFO("RESTORE_HARDLINK", "Hard linking %s to %s", fullPath, sourcePath)
if err := os.Link(sourcePath, fullPath); err != nil {
LOG_ERROR("RESTORE_HARDLINK", "Failed to create hard link %s to %s", fullPath, sourcePath)
return 0
}
@@ -1167,14 +1103,14 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
encoder := msgpack.NewEncoder(buffer)
metadataChunkMaker := CreateMetaDataChunkMaker(manager.config, metadataChunkSize)
var chunkHashes []string
var chunkHashes []string
var chunkLengths []int
lastChunk := -1
lastEndChunk := 0
type hardLinkEntry struct {
entry *Entry
entry *Entry
startChunk int
}
var hardLinkTable []hardLinkEntry
@@ -1206,10 +1142,10 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
entry.StartChunk -= lastEndChunk
lastEndChunk = entry.EndChunk
entry.EndChunk = delta
} else if entry.IsHardlinkedFrom() && !entry.IsLink() {
i, err := entry.GetHardlinkId()
if err != nil {
LOG_ERROR("SNAPSHOT_UPLOAD", "Decode error for hardlinked entry %s, %v", entry.Link, err)
} else if entry.IsHardlinkedFrom() {
i, err := strconv.ParseUint(entry.Link, 16, 64)
if err != nil {
LOG_ERROR("SNAPSHOT_UPLOAD", "Decode error in hardlink entry, expected hex int, got %s", entry.Link)
return err
}
@@ -1297,10 +1233,9 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be
// overwritten directly.
// Return: true, nil: Restored file;
//
// false, nil: Skipped file;
// false, error: Failure to restore file (only if allowFailures == true)
// Return: true, nil: Restored file;
// false, nil: Skipped file;
// false, error: Failure to restore file (only if allowFailures == true)
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64, allowFailures bool) (bool, error) {
@@ -1476,7 +1411,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
// fileHash != entry.Hash, warn/error depending on -overwrite option
if !overwrite && !isNewFile {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
return false, fmt.Errorf("file exists")
}
@@ -1723,7 +1658,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
if otherManager.config.DataShards != 0 && otherManager.config.ParityShards != 0 {
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled for the destination storage with %d data shards and %d parity shards",
otherManager.config.DataShards, otherManager.config.ParityShards)
otherManager.config.DataShards, otherManager.config.ParityShards)
}
if otherManager.config.rsaPublicKey != nil && len(otherManager.config.FileKey) > 0 {
@@ -1824,15 +1759,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
for _, chunkHash := range snapshot.FileSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
for _, chunkHash := range snapshot.ChunkSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
for _, chunkHash := range snapshot.LengthSequence {
chunks[chunkHash] = true // The chunk is a snapshot chunk
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
@@ -1845,7 +1780,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
for _, chunkHash := range snapshot.ChunkHashes {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = false // The chunk is a file chunk
chunks[chunkHash] = false // The chunk is a file chunk
}
}
@@ -1877,7 +1812,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
}
}
LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks)-len(chunksToCopy), len(chunks))
LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks) - len(chunksToCopy), len(chunks))
chunkDownloader := CreateChunkOperator(manager.config, manager.storage, nil, false, false, downloadingThreads, false)
@@ -1886,7 +1821,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
copiedChunks := 0
chunkUploader := CreateChunkOperator(otherManager.config, otherManager.storage, nil, false, false, uploadingThreads, false)
chunkUploader.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
chunkUploader.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
action := "Skipped"
if !skipped {
copiedChunks++
@@ -1897,11 +1832,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(atomic.LoadInt64(&uploadedBytes)) / elapsedTime)
remainingTime := int64(float64(len(chunksToCopy)-chunkIndex-1) / float64(chunkIndex+1) * elapsedTime)
percentage := float64(chunkIndex+1) / float64(len(chunksToCopy)) * 100.0
remainingTime := int64(float64(len(chunksToCopy) - chunkIndex - 1) / float64(chunkIndex + 1) * elapsedTime)
percentage := float64(chunkIndex + 1) / float64(len(chunksToCopy)) * 100.0
LOG_INFO("COPY_PROGRESS", "%s chunk %s (%d/%d) %sB/s %s %.1f%%",
action, chunk.GetID(), chunkIndex+1, len(chunksToCopy),
PrettySize(speed), PrettyTime(remainingTime), percentage)
action, chunk.GetID(), chunkIndex + 1, len(chunksToCopy),
PrettySize(speed), PrettyTime(remainingTime), percentage)
otherManager.config.PutChunk(chunk)
}
@@ -1924,7 +1859,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
chunkDownloader.Stop()
chunkUploader.Stop()
LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks)-copiedChunks)
LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks) - copiedChunks)
for _, snapshot := range snapshots {
if revisionMap[snapshot.ID][snapshot.Revision] == false {

View File

@@ -8,7 +8,6 @@ import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
@@ -24,11 +23,6 @@ import (
"github.com/vmihailenco/msgpack"
)
const (
entrySymHardLinkRootChunkMarker = -72
entrySymHardLinkTargetChunkMarker = -73
)
// This is the hidden directory in the repository for storing various files.
var DUPLICACY_DIRECTORY = ".duplicacy"
var DUPLICACY_FILE = ".duplicacy"
@@ -516,11 +510,7 @@ func (entry *Entry) IsLink() bool {
}
func (entry *Entry) IsSpecial() bool {
return entry.Mode&uint32(os.ModeNamedPipe|os.ModeDevice|os.ModeCharDevice|os.ModeSocket) != 0
}
func (entry *Entry) IsFileOrSpecial() bool {
return entry.Mode&uint32(os.ModeDir|os.ModeSymlink|os.ModeIrregular) == 0
return entry.Mode&uint32(os.ModeNamedPipe|os.ModeDevice|os.ModeCharDevice) != 0
}
func (entry *Entry) IsComplete() bool {
@@ -528,23 +518,11 @@ func (entry *Entry) IsComplete() bool {
}
func (entry *Entry) IsHardlinkedFrom() bool {
return (entry.IsFileOrSpecial() && len(entry.Link) > 0 && entry.Link != "/") || (entry.IsLink() && entry.StartChunk == entrySymHardLinkTargetChunkMarker)
return entry.IsFile() && len(entry.Link) > 0 && entry.Link != "/"
}
func (entry *Entry) IsHardlinkRoot() bool {
return (entry.IsFileOrSpecial() && entry.Link == "/") || (entry.IsLink() && entry.StartChunk == entrySymHardLinkRootChunkMarker)
}
func (entry *Entry) GetHardlinkId() (int, error) {
if entry.IsLink() {
if entry.StartChunk != entrySymHardLinkTargetChunkMarker {
return 0, errors.New("Symlink entry not marked as hardlinked")
}
return entry.StartOffset, nil
} else {
i, err := strconv.ParseUint(entry.Link, 16, 64)
return int(i), err
}
return entry.IsFile() && entry.Link == "/"
}
func (entry *Entry) GetPermissions() os.FileMode {
@@ -605,10 +583,6 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
}
}
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
entry.SetAttributesToFile(fullPath)
}
// Only set the time if the file is not a symlink
if !entry.IsLink() && (*fileInfo).ModTime().Unix() != entry.Time {
modifiedTime := time.Unix(entry.Time, 0)
@@ -619,6 +593,10 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
}
}
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
entry.SetAttributesToFile(fullPath)
}
return true
}
@@ -807,41 +785,10 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
if f.Name() == DUPLICACY_DIRECTORY {
continue
}
entry := CreateEntryFromFileInfo(f, normalizedPath)
if len(patterns) > 0 && !MatchPath(entry.Path, patterns) {
continue
}
var linkKey *listEntryLinkKey
if runtime.GOOS != "windows" && !entry.IsDir() {
if stat := f.Sys().(*syscall.Stat_t); stat != nil && stat.Nlink > 1 {
k := listEntryLinkKey{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if linkIndex, seen := listingState.linkTable[k]; seen {
if linkIndex == -1 {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute (hardlink)", entry.Path)
continue
}
entry.Size = 0
if entry.IsLink() {
entry.StartChunk = entrySymHardLinkTargetChunkMarker
entry.StartOffset = linkIndex
} else {
entry.Link = strconv.FormatInt(int64(linkIndex), 16)
}
} else {
if entry.IsLink() {
entry.StartChunk = entrySymHardLinkRootChunkMarker
} else {
entry.Link = "/"
}
listingState.linkTable[k] = -1
linkKey = &k
}
}
}
if entry.IsLink() {
isRegular := false
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
@@ -872,6 +819,9 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
}
entry = newEntry
}
} else if entry.Mode & uint32(os.ModeSocket) != 0 {
// no reason to issue a warning for what should always be a transient file anyways
continue
} else if entry.IsSpecial() {
if !entry.ReadSpecial(f) {
LOG_WARN("LIST_DEV", "Failed to save device node %s", entry.Path)
@@ -880,6 +830,24 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
}
}
var linkKey *listEntryLinkKey
if stat, ok := f.Sys().(*syscall.Stat_t); entry.IsFile() && ok && stat != nil && stat.Nlink > 1 {
k := listEntryLinkKey{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
if linkIndex, seen := listingState.linkTable[k]; seen {
if linkIndex == -1 {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute (hardlink)", entry.Path)
continue
}
entry.Size = 0
entry.Link = strconv.FormatInt(int64(linkIndex), 16)
} else {
entry.Link = "/"
listingState.linkTable[k] = -1
linkKey = &k
}
}
entry.ReadAttributes(top)
if excludeByAttribute && entry.Attributes != nil && excludedByAttribute(*entry.Attributes) {

View File

@@ -756,8 +756,6 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
LOG_ERROR("STORAGE_CREATE", "Failed to load the Storj storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "storj_key", apiKey)
SavePassword(preference, "storj_passphrase", passphrase)
return storjStorage
} else if matched[1] == "smb" {
server := matched[3]

View File

@@ -7,15 +7,14 @@ package duplicacy
import (
"bufio"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"runtime"
"github.com/gilbertchen/gopass"
"golang.org/x/crypto/pbkdf2"
@@ -57,7 +56,7 @@ func IsEmptyFilter(pattern string) bool {
}
func IsUnspecifiedFilter(pattern string) bool {
if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") {
if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") {
return true
} else {
return false
@@ -276,6 +275,7 @@ func SavePassword(preference Preference, passwordType string, password string) {
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
// Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd'
// against '*ccd', and the version here fixed that issue.
//
func matchPattern(text string, pattern string) bool {
textLength := len(text)
@@ -469,39 +469,8 @@ func PrintMemoryUsage() {
runtime.ReadMemStats(&m)
LOG_INFO("MEMORY_STATS", "Currently allocated: %s, total allocated: %s, system memory: %s, number of GCs: %d",
PrettySize(int64(m.Alloc)), PrettySize(int64(m.TotalAlloc)), PrettySize(int64(m.Sys)), m.NumGC)
PrettySize(int64(m.Alloc)), PrettySize(int64(m.TotalAlloc)), PrettySize(int64(m.Sys)), m.NumGC)
time.Sleep(time.Second)
}
}
func (entry *Entry) dump() map[string]interface{} {
object := make(map[string]interface{})
object["path"] = entry.Path
object["size"] = entry.Size
object["time"] = entry.Time
object["mode"] = entry.Mode
object["hash"] = entry.Hash
object["link"] = entry.Link
object["content"] = fmt.Sprintf("%d:%d:%d:%d",
entry.StartChunk, entry.StartOffset, entry.EndChunk, entry.EndOffset)
if entry.UID != -1 && entry.GID != -1 {
object["uid"] = entry.UID
object["gid"] = entry.GID
}
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
object["attributes"] = entry.Attributes
}
return object
}
func (entry *Entry) dumpString() string {
data, _ := json.Marshal(entry.dump())
return string(data)
}
}

View File

@@ -0,0 +1,53 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd || darwin
// +build freebsd netbsd darwin
package duplicacy
import (
"encoding/binary"
"os"
"syscall"
)
const bsdFileFlagsKey = "\x00bf"
func (entry *Entry) ReadFileFlags(f *os.File) error {
fileInfo, err := f.Stat()
if err != nil {
return err
}
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if ok && stat.Flags != 0 {
if entry.Attributes == nil {
entry.Attributes = &map[string][]byte{}
}
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, stat.Flags)
(*entry.Attributes)[bsdFileFlagsKey] = v
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", stat.Flags, entry.Path)
}
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
LOG_DEBUG("ATTR_RESTORE", "Restore flags 0x%x for %s", binary.LittleEndian.Uint32(v), entry.Path)
return syscall.Fchflags(int(f.Fd()), int(binary.LittleEndian.Uint32(v)))
}
return nil
}

View File

@@ -1,109 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd || darwin
// +build freebsd netbsd darwin
package duplicacy
import (
"bytes"
"encoding/binary"
"os"
"path/filepath"
"syscall"
"github.com/pkg/xattr"
)
const bsdFileFlagsKey = "\x00bf"
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
fileInfo, err := os.Lstat(fullPath)
if err != nil {
return
}
if !entry.IsSpecial() {
attributes, _ := xattr.LList(fullPath)
if len(attributes) > 0 {
entry.Attributes = &map[string][]byte{}
for _, name := range attributes {
attribute, err := xattr.LGet(fullPath, name)
if err == nil {
(*entry.Attributes)[name] = attribute
}
}
}
}
if err := entry.readFileFlags(fileInfo); err != nil {
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
}
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
if !entry.IsSpecial() {
names, _ := xattr.LList(fullPath)
for _, name := range names {
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := xattr.LGet(fullPath, name)
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.LSet(fullPath, name, newAttribute)
}
delete(*entry.Attributes, name)
} else {
xattr.LRemove(fullPath, name)
}
}
for name, attribute := range *entry.Attributes {
if len(name) > 0 && name[0] == '\x00' {
continue
}
xattr.LSet(fullPath, name, attribute)
}
}
if err := entry.restoreLateFileFlags(fullPath); err != nil {
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
}
}
func (entry *Entry) readFileFlags(fileInfo os.FileInfo) error {
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if ok && stat.Flags != 0 {
if entry.Attributes == nil {
entry.Attributes = &map[string][]byte{}
}
v := make([]byte, 4)
binary.LittleEndian.PutUint32(v, stat.Flags)
(*entry.Attributes)[bsdFileFlagsKey] = v
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", stat.Flags, entry.Path)
}
return nil
}
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
return nil
}
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreSpecial(fullPath string) error {
mode := entry.Mode & uint32(fileModeMask)
if entry.Mode&uint32(os.ModeNamedPipe) != 0 {
mode |= syscall.S_IFIFO
} else if entry.Mode&uint32(os.ModeCharDevice) != 0 {
mode |= syscall.S_IFCHR
} else if entry.Mode&uint32(os.ModeDevice) != 0 {
mode |= syscall.S_IFBLK
} else {
return nil
}
return syscall.Mknod(fullPath, mode, int(entry.GetRdev()))
}

View File

@@ -5,29 +5,10 @@
package duplicacy
import (
"encoding/binary"
"os"
"strings"
"syscall"
)
func excludedByAttribute(attributes map[string][]byte) bool {
value, ok := attributes["com.apple.metadata:com_apple_backup_excludeItem"]
return ok && strings.Contains(string(value), "com.apple.backupd")
}
func (entry *Entry) restoreLateFileFlags(path string) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
f, err := os.OpenFile(path, os.O_RDONLY|syscall.O_SYMLINK, 0)
if err != nil {
return err
}
err = syscall.Fchflags(int(f.Fd()), int(binary.LittleEndian.Uint32(v)))
f.Close()
return err
}
return nil
}

View File

@@ -5,14 +5,10 @@
package duplicacy
import (
"bytes"
"encoding/binary"
"os"
"path/filepath"
"syscall"
"unsafe"
"github.com/pkg/xattr"
)
const (
@@ -51,116 +47,10 @@ func ioctl(f *os.File, request uintptr, attrp *uint32) error {
return nil
}
type xattrHandle struct {
f *os.File
fullPath string
}
func (x xattrHandle) list() ([]string, error) {
if x.f != nil {
return xattr.FList(x.f)
} else {
return xattr.LList(x.fullPath)
func (entry *Entry) ReadFileFlags(f *os.File) error {
if entry.IsSpecial() {
return nil
}
}
func (x xattrHandle) get(name string) ([]byte, error) {
if x.f != nil {
return xattr.FGet(x.f, name)
} else {
return xattr.LGet(x.fullPath, name)
}
}
func (x xattrHandle) set(name string, value []byte) error {
if x.f != nil {
return xattr.FSet(x.f, name, value)
} else {
return xattr.LSet(x.fullPath, name, value)
}
}
func (x xattrHandle) remove(name string) error {
if x.f != nil {
return xattr.FRemove(x.f, name)
} else {
return xattr.LRemove(x.fullPath, name)
}
}
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
x := xattrHandle{nil, fullPath}
if !entry.IsLink() {
var err error
x.f, err = os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW|syscall.O_NONBLOCK, 0)
if err != nil {
// FIXME: We really should return errors for failure to read
return
}
}
attributes, _ := x.list()
if len(attributes) > 0 {
entry.Attributes = &map[string][]byte{}
}
for _, name := range attributes {
attribute, err := x.get(name)
if err == nil {
(*entry.Attributes)[name] = attribute
}
}
if entry.IsFile() || entry.IsDir() {
if err := entry.readFileFlags(x.f); err != nil {
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
}
}
x.f.Close()
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
x := xattrHandle{nil, fullPath}
if !entry.IsLink() {
var err error
x.f, err = os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return
}
}
names, _ := x.list()
for _, name := range names {
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := x.get(name)
if !bytes.Equal(oldAttribute, newAttribute) {
x.set(name, newAttribute)
}
delete(*entry.Attributes, name)
} else {
x.remove(name)
}
}
for name, attribute := range *entry.Attributes {
if len(name) > 0 && name[0] == '\x00' {
continue
}
x.set(name, attribute)
}
if entry.IsFile() || entry.IsDir() {
if err := entry.restoreLateFileFlags(x.f); err != nil {
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
}
}
x.f.Close()
}
func (entry *Entry) readFileFlags(f *os.File) error {
var flags uint32
if err := ioctl(f, linux_FS_IOC_GETFLAGS, &flags); err != nil {
return err
@@ -207,7 +97,7 @@ func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) restoreLateFileFlags(f *os.File) error {
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
if entry.Attributes == nil {
return nil
}
@@ -219,23 +109,6 @@ func (entry *Entry) restoreLateFileFlags(f *os.File) error {
return nil
}
func (entry *Entry) RestoreSpecial(fullPath string) error {
mode := entry.Mode & uint32(fileModeMask)
if entry.Mode&uint32(os.ModeNamedPipe) != 0 {
mode |= syscall.S_IFIFO
} else if entry.Mode&uint32(os.ModeCharDevice) != 0 {
mode |= syscall.S_IFCHR
} else if entry.Mode&uint32(os.ModeDevice) != 0 {
mode |= syscall.S_IFBLK
} else if entry.Mode&uint32(os.ModeSocket) != 0 {
mode |= syscall.S_IFSOCK
} else {
return nil
}
return syscall.Mknod(fullPath, mode, int(entry.GetRdev()))
}
func excludedByAttribute(attributes map[string][]byte) bool {
_, ok := attributes["user.duplicacy_exclude"]
return ok

View File

@@ -2,17 +2,18 @@
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build !windows
// +build !windows
package duplicacy
import (
"bytes"
"os"
"path"
"path/filepath"
"syscall"
"golang.org/x/sys/unix"
"github.com/pkg/xattr"
)
func Readlink(path string) (isRegular bool, s string, err error) {
@@ -46,12 +47,66 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
return true
}
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW|syscall.O_NONBLOCK, 0)
if err != nil {
return
}
attributes, _ := xattr.FList(f)
if len(attributes) > 0 {
entry.Attributes = &map[string][]byte{}
for _, name := range attributes {
attribute, err := xattr.Get(fullPath, name)
if err == nil {
(*entry.Attributes)[name] = attribute
}
}
}
if err := entry.ReadFileFlags(f); err != nil {
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
}
f.Close()
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
if err != nil {
return
}
names, _ := xattr.FList(f)
for _, name := range names {
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := xattr.FGet(f, name)
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.FSet(f, name, newAttribute)
}
delete(*entry.Attributes, name)
} else {
xattr.FRemove(f, name)
}
}
for name, attribute := range *entry.Attributes {
if len(name) > 0 && name[0] == '\x00' {
continue
}
xattr.FSet(f, name, attribute)
}
if err := entry.RestoreLateFileFlags(f); err != nil {
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
}
f.Close()
}
func (entry *Entry) ReadSpecial(fileInfo os.FileInfo) bool {
if fileInfo.Mode()&(os.ModeDevice|os.ModeCharDevice) == 0 {
if fileInfo.Mode() & (os.ModeDevice | os.ModeCharDevice) == 0 {
return true
}
stat := fileInfo.Sys().(*syscall.Stat_t)
if stat == nil {
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
if !ok || stat == nil {
return false
}
entry.Size = 0
@@ -61,20 +116,20 @@ func (entry *Entry) ReadSpecial(fileInfo os.FileInfo) bool {
return true
}
func (entry *Entry) GetRdev() uint64 {
return uint64(entry.StartChunk) | uint64(entry.StartOffset)<<32
}
func (entry *Entry) IsSameSpecial(fileInfo os.FileInfo) bool {
stat := fileInfo.Sys().(*syscall.Stat_t)
if stat == nil {
return false
func (entry *Entry) RestoreSpecial(fullPath string) error {
if entry.Mode & uint32(os.ModeDevice | os.ModeCharDevice) != 0 {
mode := entry.Mode & uint32(fileModeMask)
if entry.Mode & uint32(os.ModeCharDevice) != 0 {
mode |= syscall.S_IFCHR
} else {
mode |= syscall.S_IFBLK
}
rdev := uint64(entry.StartChunk) | uint64(entry.StartOffset) << 32
return syscall.Mknod(fullPath, mode, int(rdev))
} else if entry.Mode & uint32(os.ModeNamedPipe) != 0 {
return syscall.Mkfifo(fullPath, uint32(entry.Mode))
}
return (uint32(fileInfo.Mode()) == entry.Mode) && (uint64(stat.Rdev) == entry.GetRdev())
}
func MakeHardlink(source string, target string) error {
return unix.Linkat(unix.AT_FDCWD, source, unix.AT_FDCWD, target, 0)
return nil
}
func joinPath(components ...string) string {

View File

@@ -125,10 +125,6 @@ func (entry *Entry) RestoreSpecial(fullPath string) error {
return nil
}
func MakeHardlink(source string, target string) error {
return os.Link(source, target)
}
func joinPath(components ...string) string {
combinedPath := `\\?\` + filepath.Join(components...)

View File

@@ -1,30 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//go:build freebsd || netbsd
// +build freebsd netbsd
package duplicacy
import (
"bytes"
"encoding/binary"
"os"
"path/filepath"
"syscall"
"github.com/pkg/xattr"
)
func (entry *Entry) restoreLateFileFlags(path string) error {
if entry.Attributes == nil {
return nil
}
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
if _, _, errno := syscall.Syscall(syscall.SYS_LCHFLAGS, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), uintptr(v), 0); errno != 0 {
return os.NewSyscallError("lchflags", errno)
}
}
return nil
}