mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-04 20:54:44 -06:00
Compare commits
5 Commits
wip-hardli
...
xattr-excl
| Author | SHA1 | Date | |
|---|---|---|---|
| e30bf3b9bc | |||
| bd2849183c | |||
|
|
50120146df | ||
|
|
7bfc0e7d51 | ||
|
|
fd3bceae19 |
@@ -2262,7 +2262,7 @@ func main() {
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "3.2.0" + " (" + GitCommit + ")"
|
||||
app.Version = "3.2.2" + " (" + GitCommit + ")"
|
||||
|
||||
// Exit with code 2 if an invalid command is provided
|
||||
app.CommandNotFound = func(context *cli.Context, command string) {
|
||||
|
||||
@@ -622,11 +622,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
return true
|
||||
}
|
||||
|
||||
type hardLinkEntry struct {
|
||||
entry *Entry
|
||||
willDownload bool
|
||||
}
|
||||
|
||||
// Restore downloads the specified snapshot, compares it with what's on the repository, and then downloads
|
||||
// files that are different. 'base' is a directory that contains files at a different revision which can
|
||||
// serve as a local cache to avoid download chunks available locally. It is perfectly ok for 'base' to be
|
||||
@@ -708,16 +703,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
var localEntry *Entry
|
||||
localListingOK := true
|
||||
|
||||
hardLinkTable := make(map[string]hardLinkEntry)
|
||||
hardLinks := make([]*Entry, 0)
|
||||
|
||||
for remoteEntry := range remoteListingChannel {
|
||||
|
||||
if remoteEntry.IsFile() && remoteEntry.Link == "/" {
|
||||
LOG_INFO("RESTORE_LINK", "Noting hardlinked source file %s", remoteEntry.Path)
|
||||
hardLinkTable[remoteEntry.Path] = hardLinkEntry{remoteEntry, false}
|
||||
}
|
||||
|
||||
if len(patterns) > 0 && !MatchPath(remoteEntry.Path, patterns) {
|
||||
continue
|
||||
}
|
||||
@@ -726,8 +713,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
var compareResult int
|
||||
|
||||
for {
|
||||
// TODO: We likely need to check if a local listing file exists in the hardLinkTable for the case where one is restoring a hardlink
|
||||
// to an existing disk file. Right now, we'll just end up downloading the file new.
|
||||
if localEntry == nil && localListingOK {
|
||||
localEntry, localListingOK = <- localListingChannel
|
||||
}
|
||||
@@ -745,22 +730,12 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
}
|
||||
|
||||
if compareResult == 0 {
|
||||
if quickMode && localEntry.IsFile() {
|
||||
checkEntry := remoteEntry
|
||||
if len(remoteEntry.Link) > 0 && remoteEntry.Link != "/" {
|
||||
if e, ok := hardLinkTable[remoteEntry.Link]; !ok {
|
||||
LOG_ERROR("RESTORE_LINK", "Source file %s for hardlink %s missing", remoteEntry.Link, remoteEntry.Path)
|
||||
} else {
|
||||
checkEntry = e.entry
|
||||
}
|
||||
}
|
||||
if localEntry.IsSameAs(checkEntry) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", localEntry.Path)
|
||||
skippedFileSize += localEntry.Size
|
||||
skippedFileCount++
|
||||
localEntry = nil
|
||||
continue
|
||||
}
|
||||
if quickMode && localEntry.IsFile() && localEntry.IsSameAs(remoteEntry) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", localEntry.Path)
|
||||
skippedFileSize += localEntry.Size
|
||||
skippedFileCount++
|
||||
localEntry = nil
|
||||
continue
|
||||
}
|
||||
localEntry = nil
|
||||
}
|
||||
@@ -807,21 +782,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
}
|
||||
directoryEntries = append(directoryEntries, remoteEntry)
|
||||
} else {
|
||||
if remoteEntry.Link == "/" {
|
||||
hardLinkTable[remoteEntry.Path] = hardLinkEntry{remoteEntry, true}
|
||||
} else if len(remoteEntry.Link) > 0 {
|
||||
if e, ok := hardLinkTable[remoteEntry.Link]; !ok {
|
||||
LOG_ERROR("RESTORE_LINK", "Source file %s for hardlink %s missing", remoteEntry.Link, remoteEntry.Path)
|
||||
} else if !e.willDownload {
|
||||
origSourcePath := e.entry.Path
|
||||
e.entry.Path = remoteEntry.Path
|
||||
remoteEntry = e.entry
|
||||
hardLinkTable[origSourcePath] = hardLinkEntry{remoteEntry, true}
|
||||
} else {
|
||||
hardLinks = append(hardLinks, remoteEntry)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// We can't download files here since fileEntries needs to be sorted
|
||||
fileEntries = append(fileEntries, remoteEntry)
|
||||
totalFileSize += remoteEntry.Size
|
||||
@@ -877,11 +837,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
stat, _ := os.Stat(fullPath)
|
||||
if stat != nil {
|
||||
if quickMode {
|
||||
cmpFile := file
|
||||
if file.IsFile() && len(file.Link) > 0 && file.Link != "/" {
|
||||
cmpFile = hardLinkTable[file.Link].entry
|
||||
}
|
||||
if cmpFile.IsSameAsFileInfo(stat) {
|
||||
if file.IsSameAsFileInfo(stat) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path)
|
||||
skippedFileSize += file.Size
|
||||
skippedFileCount++
|
||||
@@ -918,6 +874,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
downloadedFileSize += file.Size
|
||||
downloadedFiles = append(downloadedFiles, file)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -944,15 +901,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
}
|
||||
|
||||
for _, linkEntry := range hardLinks {
|
||||
sourcePath := joinPath(top, hardLinkTable[linkEntry.Link].entry.Path)
|
||||
fullPath := joinPath(top, linkEntry.Path)
|
||||
LOG_INFO("DOWNLOAD_LINK", "Hard linking %s -> %s", fullPath, sourcePath)
|
||||
if err := os.Link(sourcePath, fullPath); err != nil {
|
||||
LOG_ERROR("DOWNLOAD_LINK", "Failed to create hard link %s -> %s", fullPath, sourcePath)
|
||||
}
|
||||
}
|
||||
|
||||
if deleteMode && len(patterns) == 0 {
|
||||
// Reverse the order to make sure directories are empty before being deleted
|
||||
for i := range extraFiles {
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"time"
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"syscall"
|
||||
|
||||
"github.com/vmihailenco/msgpack"
|
||||
|
||||
@@ -695,26 +694,10 @@ func (files FileInfoCompare) Less(i, j int) bool {
|
||||
}
|
||||
}
|
||||
|
||||
type listEntryLinkKey struct {
|
||||
dev uint64
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type ListingState struct {
|
||||
linkTable map[listEntryLinkKey]string // map unique inode details to initially found path
|
||||
}
|
||||
|
||||
func NewListingState() *ListingState {
|
||||
return &ListingState{
|
||||
linkTable: make(map[listEntryLinkKey]string),
|
||||
}
|
||||
}
|
||||
|
||||
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
|
||||
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
|
||||
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool,
|
||||
listingState *ListingState,
|
||||
listingChannel chan *Entry) (directoryList []*Entry, skippedFiles []string, err error) {
|
||||
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool, listingChannel chan *Entry) (directoryList []*Entry,
|
||||
skippedFiles []string, err error) {
|
||||
|
||||
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
|
||||
|
||||
@@ -801,21 +784,6 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
|
||||
continue
|
||||
}
|
||||
|
||||
if entry.IsFile() {
|
||||
stat, ok := f.Sys().(*syscall.Stat_t)
|
||||
if ok && stat != nil && stat.Nlink > 1 {
|
||||
k := listEntryLinkKey{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
|
||||
if path, ok := listingState.linkTable[k]; ok {
|
||||
LOG_WARN("LIST_LINK", "Linking %s to %s", entry.Path, path)
|
||||
entry.Link = path
|
||||
entry.Size = 0
|
||||
} else {
|
||||
entry.Link = "/"
|
||||
listingState.linkTable[k] = entry.Path
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if entry.IsDir() {
|
||||
directoryList = append(directoryList, entry)
|
||||
} else {
|
||||
|
||||
@@ -240,10 +240,12 @@ func TestEntryExcludeByAttribute(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
excludeAttrName = "com.apple.metadata:com_apple_backup_excludeItem"
|
||||
excludeAttrValue = []byte("com.apple.backupd")
|
||||
} else if runtime.GOOS == "linux" || runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" || runtime.GOOS == "solaris" {
|
||||
} else if runtime.GOOS == "linux" {
|
||||
excludeAttrName = "user.duplicacy_exclude"
|
||||
} else if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
|
||||
excludeAttrName = "duplicacy_exclude"
|
||||
} else {
|
||||
t.Skip("skipping test, not darwin, linux, freebsd, netbsd, or solaris")
|
||||
t.Skip("skipping test, not darwin, linux, freebsd, or netbsd")
|
||||
}
|
||||
|
||||
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
|
||||
|
||||
@@ -90,40 +90,48 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
||||
|
||||
if dir == "snapshots/" {
|
||||
dir = storage.storageDir + dir
|
||||
input := s3.ListObjectsV2Input{
|
||||
input := s3.ListObjectsInput{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
Delimiter: aws.String("/"),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
}
|
||||
|
||||
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, subDir := range page.CommonPrefixes {
|
||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||
}
|
||||
return true
|
||||
})
|
||||
output, err := storage.client.ListObjects(&input)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, subDir := range output.CommonPrefixes {
|
||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||
}
|
||||
return files, nil, nil
|
||||
} else {
|
||||
dir = storage.storageDir + dir
|
||||
input := s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
}
|
||||
marker := ""
|
||||
for {
|
||||
input := s3.ListObjectsInput{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
Marker: aws.String(marker),
|
||||
}
|
||||
|
||||
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, object := range page.Contents {
|
||||
output, err := storage.client.ListObjects(&input)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, object := range output.Contents {
|
||||
files = append(files, (*object.Key)[len(dir):])
|
||||
sizes = append(sizes, *object.Size)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
if !*output.IsTruncated {
|
||||
break
|
||||
}
|
||||
|
||||
marker = *output.Contents[len(output.Contents)-1].Key
|
||||
}
|
||||
return files, sizes, nil
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ type Snapshot struct {
|
||||
// CreateEmptySnapshot creates an empty snapshot.
|
||||
func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
|
||||
return &Snapshot{
|
||||
Version: 0x6a6c01,
|
||||
Version: 1,
|
||||
ID: id,
|
||||
Revision: 0,
|
||||
StartTime: time.Now().Unix(),
|
||||
@@ -68,7 +68,6 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
|
||||
skippedDirectories *[]string, skippedFiles *[]string) {
|
||||
|
||||
var patterns []string
|
||||
var listingState = NewListingState()
|
||||
|
||||
if filtersFile == "" {
|
||||
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
|
||||
@@ -82,7 +81,7 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
|
||||
|
||||
directory := directories[len(directories)-1]
|
||||
directories = directories[:len(directories)-1]
|
||||
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingState, listingChannel)
|
||||
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingChannel)
|
||||
if err != nil {
|
||||
if directory.Path == "" {
|
||||
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
|
||||
@@ -161,7 +160,7 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if snapshot.Version == 1 || snapshot.Version == 0x6a6c01 {
|
||||
} else if snapshot.Version == 1 {
|
||||
decoder := msgpack.NewDecoder(reader)
|
||||
|
||||
lastEndChunk := 0
|
||||
|
||||
@@ -756,6 +756,8 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Storj storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "storj_key", apiKey)
|
||||
SavePassword(preference, "storj_passphrase", passphrase)
|
||||
return storjStorage
|
||||
} else if matched[1] == "smb" {
|
||||
server := matched[3]
|
||||
|
||||
@@ -2,13 +2,8 @@
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
// +build freebsd netbsd linux solaris
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
)
|
||||
|
||||
func excludedByAttribute(attributes map[string][]byte) bool {
|
||||
_, ok := attributes["user.duplicacy_exclude"]
|
||||
return ok
|
||||
13
src/duplicacy_utils_xbsd.go
Normal file
13
src/duplicacy_utils_xbsd.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
//go:build freebsd || netbsd
|
||||
// +build freebsd netbsd
|
||||
|
||||
package duplicacy
|
||||
|
||||
func excludedByAttribute(attributes map[string][]byte) bool {
|
||||
_, ok := attributes["duplicacy_exclude"]
|
||||
return ok
|
||||
}
|
||||
Reference in New Issue
Block a user