mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-02 11:44:45 -06:00
Compare commits
4 Commits
b2749b6e20
...
xattr-excl
| Author | SHA1 | Date | |
|---|---|---|---|
| e30bf3b9bc | |||
| bd2849183c | |||
|
|
50120146df | ||
|
|
7bfc0e7d51 |
@@ -2262,7 +2262,7 @@ func main() {
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "3.2.1" + " (" + GitCommit + ")"
|
||||
app.Version = "3.2.2" + " (" + GitCommit + ")"
|
||||
|
||||
// Exit with code 2 if an invalid command is provided
|
||||
app.CommandNotFound = func(context *cli.Context, command string) {
|
||||
|
||||
@@ -396,7 +396,7 @@ type B2ListFileNamesOutput struct {
|
||||
|
||||
func (client *B2Client) ListFileNames(threadIndex int, startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
||||
|
||||
maxFileCount := 10_000
|
||||
maxFileCount := 1000
|
||||
if singleFile {
|
||||
if includeVersions {
|
||||
maxFileCount = 4
|
||||
|
||||
@@ -703,19 +703,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
var localEntry *Entry
|
||||
localListingOK := true
|
||||
|
||||
type hardLinkEntry struct {
|
||||
entry *Entry
|
||||
willDownload bool
|
||||
}
|
||||
var hardLinkTable []hardLinkEntry
|
||||
var hardLinks []*Entry
|
||||
|
||||
for remoteEntry := range remoteListingChannel {
|
||||
|
||||
if remoteEntry.IsHardlinkRoot() {
|
||||
hardLinkTable = append(hardLinkTable, hardLinkEntry{remoteEntry, false})
|
||||
}
|
||||
|
||||
if len(patterns) > 0 && !MatchPath(remoteEntry.Path, patterns) {
|
||||
continue
|
||||
}
|
||||
@@ -753,7 +742,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
|
||||
fullPath := joinPath(top, remoteEntry.Path)
|
||||
if remoteEntry.IsLink() {
|
||||
if stat, _ := os.Lstat(fullPath); stat != nil {
|
||||
stat, err := os.Lstat(fullPath)
|
||||
if stat != nil {
|
||||
if stat.Mode()&os.ModeSymlink != 0 {
|
||||
isRegular, link, err := Readlink(fullPath)
|
||||
if err == nil && link == remoteEntry.Link && !isRegular {
|
||||
@@ -762,16 +752,11 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
}
|
||||
}
|
||||
|
||||
if !overwrite {
|
||||
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
|
||||
"File %s already exists. Please specify the -overwrite option to overwrite", remoteEntry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
os.Remove(fullPath)
|
||||
}
|
||||
|
||||
if err := os.Symlink(remoteEntry.Link, fullPath); err != nil {
|
||||
err = os.Symlink(remoteEntry.Link, fullPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", remoteEntry.Path, err)
|
||||
return 0
|
||||
}
|
||||
@@ -795,24 +780,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
return 0
|
||||
}
|
||||
}
|
||||
remoteEntry.RestoreEarlyDirFlags(fullPath)
|
||||
directoryEntries = append(directoryEntries, remoteEntry)
|
||||
} else {
|
||||
if remoteEntry.IsHardlinkRoot() {
|
||||
hardLinkTable[len(hardLinkTable)-1] = hardLinkEntry{remoteEntry, true}
|
||||
} else if remoteEntry.IsHardlinkedFrom() {
|
||||
i, err := strconv.ParseUint(remoteEntry.Link, 16, 64)
|
||||
if err != nil {
|
||||
LOG_ERROR("RESTORE_HARDLINK", "Decode error in hardlink entry, expected hex int, got %s", remoteEntry.Link)
|
||||
return 0
|
||||
}
|
||||
if !hardLinkTable[i].willDownload {
|
||||
hardLinkTable[i] = hardLinkEntry{remoteEntry, true}
|
||||
} else {
|
||||
hardLinks = append(hardLinks, remoteEntry)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// We can't download files here since fileEntries needs to be sorted
|
||||
fileEntries = append(fileEntries, remoteEntry)
|
||||
totalFileSize += remoteEntry.Size
|
||||
@@ -932,17 +901,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
}
|
||||
|
||||
for _, linkEntry := range hardLinks {
|
||||
i, _ := strconv.ParseUint(linkEntry.Link, 16, 64)
|
||||
sourcePath := joinPath(top, hardLinkTable[i].entry.Path)
|
||||
fullPath := joinPath(top, linkEntry.Path)
|
||||
LOG_INFO("RESTORE_HARDLINK", "Hard linking %s to %s", fullPath, sourcePath)
|
||||
if err := os.Link(sourcePath, fullPath); err != nil {
|
||||
LOG_ERROR("RESTORE_HARDLINK", "Failed to create hard link %s to %s", fullPath, sourcePath)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
if deleteMode && len(patterns) == 0 {
|
||||
// Reverse the order to make sure directories are empty before being deleted
|
||||
for i := range extraFiles {
|
||||
@@ -1094,13 +1052,8 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
|
||||
|
||||
lastEndChunk := 0
|
||||
|
||||
type hardLinkEntry struct {
|
||||
entry *Entry
|
||||
startChunk int
|
||||
}
|
||||
var hardLinkTable []hardLinkEntry
|
||||
|
||||
uploadEntryInfoFunc := func(entry *Entry) error {
|
||||
|
||||
if entry.IsFile() && entry.Size > 0 {
|
||||
delta := entry.StartChunk - len(chunkHashes) + 1
|
||||
if entry.StartChunk != lastChunk {
|
||||
@@ -1118,38 +1071,10 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
|
||||
entry.StartChunk -= delta
|
||||
entry.EndChunk -= delta
|
||||
|
||||
if entry.IsHardlinkRoot() {
|
||||
LOG_DEBUG("SNAPSHOT_UPLOAD", "Hard link root %s %v %v", entry.Path, entry.StartChunk, entry.EndChunk)
|
||||
hardLinkTable = append(hardLinkTable, hardLinkEntry{entry, entry.StartChunk})
|
||||
}
|
||||
|
||||
delta = entry.EndChunk - entry.StartChunk
|
||||
entry.StartChunk -= lastEndChunk
|
||||
lastEndChunk = entry.EndChunk
|
||||
entry.EndChunk = delta
|
||||
} else if entry.IsHardlinkedFrom() {
|
||||
i, err := strconv.ParseUint(entry.Link, 16, 64)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_UPLOAD", "Decode error in hardlink entry, expected hex int, got %s", entry.Link)
|
||||
return err
|
||||
}
|
||||
|
||||
targetEntry := hardLinkTable[i].entry
|
||||
var startChunk, endChunk int
|
||||
|
||||
if targetEntry.Size > 0 {
|
||||
startChunk = hardLinkTable[i].startChunk - lastEndChunk
|
||||
endChunk = targetEntry.EndChunk
|
||||
}
|
||||
entry = entry.HardLinkTo(targetEntry, startChunk, endChunk)
|
||||
|
||||
if targetEntry.Size > 0 {
|
||||
lastEndChunk = hardLinkTable[i].startChunk + endChunk
|
||||
}
|
||||
|
||||
LOG_DEBUG("SNAPSHOT_UPLOAD", "Uploading cloned hardlink for %s to %s (%v %v)", entry.Path, targetEntry.Path, startChunk, endChunk)
|
||||
} else if entry.IsHardlinkRoot() {
|
||||
hardLinkTable = append(hardLinkTable, hardLinkEntry{entry, 0})
|
||||
}
|
||||
|
||||
buffer.Reset()
|
||||
@@ -1269,7 +1194,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
||||
return false, nil
|
||||
}
|
||||
entry.RestoreEarlyFileFlags(existingFile)
|
||||
|
||||
n := int64(1)
|
||||
// There is a go bug on Windows (https://github.com/golang/go/issues/21681) that causes Seek to fail
|
||||
@@ -1453,7 +1377,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
entry.RestoreEarlyFileFlags(existingFile)
|
||||
|
||||
existingFile.Seek(0, 0)
|
||||
|
||||
@@ -1536,7 +1459,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
entry.RestoreEarlyFileFlags(newFile)
|
||||
|
||||
hasher := manager.config.NewFileHasher()
|
||||
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -17,10 +15,12 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
|
||||
"github.com/vmihailenco/msgpack"
|
||||
|
||||
"github.com/vmihailenco/msgpack"
|
||||
)
|
||||
|
||||
// This is the hidden directory in the repository for storing various files.
|
||||
@@ -110,36 +110,15 @@ func (entry *Entry) Copy() *Entry {
|
||||
UID: entry.UID,
|
||||
GID: entry.GID,
|
||||
|
||||
StartChunk: entry.StartChunk,
|
||||
StartChunk: entry.StartChunk,
|
||||
StartOffset: entry.StartOffset,
|
||||
EndChunk: entry.EndChunk,
|
||||
EndOffset: entry.EndOffset,
|
||||
EndChunk: entry.EndChunk,
|
||||
EndOffset: entry.EndOffset,
|
||||
|
||||
Attributes: entry.Attributes,
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) HardLinkTo(target *Entry, startChunk int, endChunk int) *Entry {
|
||||
return &Entry{
|
||||
Path: entry.Path,
|
||||
Size: target.Size,
|
||||
Time: target.Time,
|
||||
Mode: target.Mode,
|
||||
Link: entry.Link,
|
||||
Hash: target.Hash,
|
||||
|
||||
UID: target.UID,
|
||||
GID: target.GID,
|
||||
|
||||
StartChunk: startChunk,
|
||||
StartOffset: target.StartOffset,
|
||||
EndChunk: endChunk,
|
||||
EndOffset: target.EndOffset,
|
||||
|
||||
Attributes: target.Attributes,
|
||||
}
|
||||
}
|
||||
|
||||
// CreateEntryFromJSON creates an entry from a json description.
|
||||
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
|
||||
|
||||
@@ -383,12 +362,12 @@ func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error {
|
||||
|
||||
if entry.Attributes != nil {
|
||||
attributes := make([]string, numberOfAttributes)
|
||||
i := 0
|
||||
for attribute := range *entry.Attributes {
|
||||
attributes[i] = attribute
|
||||
i++
|
||||
}
|
||||
sort.Strings(attributes)
|
||||
i := 0
|
||||
for attribute := range *entry.Attributes {
|
||||
attributes[i] = attribute
|
||||
i++
|
||||
}
|
||||
sort.Strings(attributes)
|
||||
for _, attribute := range attributes {
|
||||
err = encoder.EncodeString(attribute)
|
||||
if err != nil {
|
||||
@@ -401,7 +380,7 @@ func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error {
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) DecodeMsgpack(decoder *msgpack.Decoder) error {
|
||||
@@ -513,22 +492,14 @@ func (entry *Entry) IsComplete() bool {
|
||||
return entry.Size >= 0
|
||||
}
|
||||
|
||||
func (entry *Entry) IsHardlinkedFrom() bool {
|
||||
return entry.IsFile() && len(entry.Link) > 0 && entry.Link != "/"
|
||||
}
|
||||
|
||||
func (entry *Entry) IsHardlinkRoot() bool {
|
||||
return entry.IsFile() && entry.Link == "/"
|
||||
}
|
||||
|
||||
func (entry *Entry) GetPermissions() os.FileMode {
|
||||
return os.FileMode(entry.Mode) & fileModeMask
|
||||
}
|
||||
|
||||
func (entry *Entry) GetParent() string {
|
||||
path := entry.Path
|
||||
if path != "" && path[len(path)-1] == '/' {
|
||||
path = path[:len(path)-1]
|
||||
if path != "" && path[len(path) - 1] == '/' {
|
||||
path = path[:len(path) - 1]
|
||||
}
|
||||
i := strings.LastIndex(path, "/")
|
||||
if i == -1 {
|
||||
@@ -625,7 +596,7 @@ func ComparePaths(left string, right string) int {
|
||||
for i := p; i < len(left); i++ {
|
||||
c3 = left[i]
|
||||
if c3 == '/' {
|
||||
last1 = i == len(left)-1
|
||||
last1 = i == len(left) - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -635,7 +606,7 @@ func ComparePaths(left string, right string) int {
|
||||
for i := p; i < len(right); i++ {
|
||||
c4 = right[i]
|
||||
if c4 == '/' {
|
||||
last2 = i == len(right)-1
|
||||
last2 = i == len(right) - 1
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -723,27 +694,10 @@ func (files FileInfoCompare) Less(i, j int) bool {
|
||||
}
|
||||
}
|
||||
|
||||
type listEntryLinkKey struct {
|
||||
dev uint64
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type ListingState struct {
|
||||
linkIndex int
|
||||
linkTable map[listEntryLinkKey]int // map unique inode details to initially found path
|
||||
}
|
||||
|
||||
func NewListingState() *ListingState {
|
||||
return &ListingState{
|
||||
linkTable: make(map[listEntryLinkKey]int),
|
||||
}
|
||||
}
|
||||
|
||||
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
|
||||
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
|
||||
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool,
|
||||
listingState *ListingState,
|
||||
listingChannel chan *Entry) (directoryList []*Entry, skippedFiles []string, err error) {
|
||||
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool, listingChannel chan *Entry) (directoryList []*Entry,
|
||||
skippedFiles []string, err error) {
|
||||
|
||||
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
|
||||
|
||||
@@ -817,30 +771,6 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
|
||||
}
|
||||
}
|
||||
|
||||
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
|
||||
skippedFiles = append(skippedFiles, entry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
var linkKey *listEntryLinkKey
|
||||
|
||||
if stat, ok := f.Sys().(*syscall.Stat_t); entry.IsFile() && ok && stat != nil && stat.Nlink > 1 {
|
||||
k := listEntryLinkKey{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
|
||||
if linkIndex, seen := listingState.linkTable[k]; seen {
|
||||
if linkIndex == -1 {
|
||||
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute (hardlink)", entry.Path)
|
||||
continue
|
||||
}
|
||||
entry.Size = 0
|
||||
entry.Link = strconv.FormatInt(int64(linkIndex), 16)
|
||||
} else {
|
||||
entry.Link = "/"
|
||||
listingState.linkTable[k] = -1
|
||||
linkKey = &k
|
||||
}
|
||||
}
|
||||
|
||||
entry.ReadAttributes(top)
|
||||
|
||||
if excludeByAttribute && entry.Attributes != nil && excludedByAttribute(*entry.Attributes) {
|
||||
@@ -848,9 +778,10 @@ func ListEntries(top string, path string, patterns []string, nobackupFile string
|
||||
continue
|
||||
}
|
||||
|
||||
if linkKey != nil {
|
||||
listingState.linkTable[*linkKey] = listingState.linkIndex
|
||||
listingState.linkIndex++
|
||||
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
|
||||
skippedFiles = append(skippedFiles, entry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
if entry.IsDir() {
|
||||
|
||||
@@ -240,10 +240,12 @@ func TestEntryExcludeByAttribute(t *testing.T) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
excludeAttrName = "com.apple.metadata:com_apple_backup_excludeItem"
|
||||
excludeAttrValue = []byte("com.apple.backupd")
|
||||
} else if runtime.GOOS == "linux" || runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" || runtime.GOOS == "solaris" {
|
||||
} else if runtime.GOOS == "linux" {
|
||||
excludeAttrName = "user.duplicacy_exclude"
|
||||
} else if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" {
|
||||
excludeAttrName = "duplicacy_exclude"
|
||||
} else {
|
||||
t.Skip("skipping test, not darwin, linux, freebsd, netbsd, or solaris")
|
||||
t.Skip("skipping test, not darwin, linux, freebsd, or netbsd")
|
||||
}
|
||||
|
||||
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
|
||||
|
||||
@@ -90,40 +90,48 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
||||
|
||||
if dir == "snapshots/" {
|
||||
dir = storage.storageDir + dir
|
||||
input := s3.ListObjectsV2Input{
|
||||
input := s3.ListObjectsInput{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
Delimiter: aws.String("/"),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
}
|
||||
|
||||
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, subDir := range page.CommonPrefixes {
|
||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||
}
|
||||
return true
|
||||
})
|
||||
output, err := storage.client.ListObjects(&input)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, subDir := range output.CommonPrefixes {
|
||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||
}
|
||||
return files, nil, nil
|
||||
} else {
|
||||
dir = storage.storageDir + dir
|
||||
input := s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
}
|
||||
marker := ""
|
||||
for {
|
||||
input := s3.ListObjectsInput{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
Marker: aws.String(marker),
|
||||
}
|
||||
|
||||
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, object := range page.Contents {
|
||||
output, err := storage.client.ListObjects(&input)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, object := range output.Contents {
|
||||
files = append(files, (*object.Key)[len(dir):])
|
||||
sizes = append(sizes, *object.Size)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
|
||||
if !*output.IsTruncated {
|
||||
break
|
||||
}
|
||||
|
||||
marker = *output.Contents[len(output.Contents)-1].Key
|
||||
}
|
||||
return files, sizes, nil
|
||||
}
|
||||
|
||||
@@ -68,7 +68,6 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
|
||||
skippedDirectories *[]string, skippedFiles *[]string) {
|
||||
|
||||
var patterns []string
|
||||
listingState := NewListingState()
|
||||
|
||||
if filtersFile == "" {
|
||||
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
|
||||
@@ -82,7 +81,7 @@ func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
|
||||
|
||||
directory := directories[len(directories)-1]
|
||||
directories = directories[:len(directories)-1]
|
||||
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingState, listingChannel)
|
||||
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingChannel)
|
||||
if err != nil {
|
||||
if directory.Path == "" {
|
||||
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
|
||||
|
||||
@@ -756,6 +756,8 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Storj storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "storj_key", apiKey)
|
||||
SavePassword(preference, "storj_passphrase", passphrase)
|
||||
return storjStorage
|
||||
} else if matched[1] == "smb" {
|
||||
server := matched[3]
|
||||
|
||||
@@ -1,90 +0,0 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
//go:build freebsd || netbsd || darwin
|
||||
// +build freebsd netbsd darwin
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"bytes"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
)
|
||||
|
||||
const bsdFileFlagsKey = "\x00bf"
|
||||
|
||||
func (entry *Entry) ReadAttributes(top string) {
|
||||
fullPath := filepath.Join(top, entry.Path)
|
||||
fileInfo, err := os.Lstat(fullPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
attributes, _ := xattr.LList(fullPath)
|
||||
if len(attributes) > 0 {
|
||||
entry.Attributes = &map[string][]byte{}
|
||||
for _, name := range attributes {
|
||||
attribute, err := xattr.LGet(fullPath, name)
|
||||
if err == nil {
|
||||
(*entry.Attributes)[name] = attribute
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := entry.readFileFlags(fileInfo); err != nil {
|
||||
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||
names, _ := xattr.LList(fullPath)
|
||||
for _, name := range names {
|
||||
newAttribute, found := (*entry.Attributes)[name]
|
||||
if found {
|
||||
oldAttribute, _ := xattr.LGet(fullPath, name)
|
||||
if !bytes.Equal(oldAttribute, newAttribute) {
|
||||
xattr.LSet(fullPath, name, newAttribute)
|
||||
}
|
||||
delete(*entry.Attributes, name)
|
||||
} else {
|
||||
xattr.LRemove(fullPath, name)
|
||||
}
|
||||
}
|
||||
|
||||
for name, attribute := range *entry.Attributes {
|
||||
if len(name) > 0 && name[0] == '\x00' {
|
||||
continue
|
||||
}
|
||||
xattr.LSet(fullPath, name, attribute)
|
||||
}
|
||||
if err := entry.restoreLateFileFlags(fullPath); err != nil {
|
||||
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) readFileFlags(fileInfo os.FileInfo) error {
|
||||
stat, ok := fileInfo.Sys().(*syscall.Stat_t)
|
||||
if ok && stat.Flags != 0 {
|
||||
if entry.Attributes == nil {
|
||||
entry.Attributes = &map[string][]byte{}
|
||||
}
|
||||
v := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(v, stat.Flags)
|
||||
(*entry.Attributes)[bsdFileFlagsKey] = v
|
||||
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", stat.Flags, entry.Path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,29 +5,10 @@
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"strings"
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
func excludedByAttribute(attributes map[string][]byte) bool {
|
||||
value, ok := attributes["com.apple.metadata:com_apple_backup_excludeItem"]
|
||||
return ok && strings.Contains(string(value), "com.apple.backupd")
|
||||
}
|
||||
|
||||
func (entry *Entry) restoreLateFileFlags(path string) error {
|
||||
if entry.Attributes == nil {
|
||||
return nil
|
||||
}
|
||||
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
|
||||
f, err := os.OpenFile(path, os.O_RDONLY|syscall.O_SYMLINK, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = syscall.Fchflags(int(f.Fd()), int(binary.LittleEndian.Uint32(v)))
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,218 +4,6 @@
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
)
|
||||
|
||||
const (
|
||||
linux_FS_SECRM_FL = 0x00000001 /* Secure deletion */
|
||||
linux_FS_UNRM_FL = 0x00000002 /* Undelete */
|
||||
linux_FS_COMPR_FL = 0x00000004 /* Compress file */
|
||||
linux_FS_SYNC_FL = 0x00000008 /* Synchronous updates */
|
||||
linux_FS_IMMUTABLE_FL = 0x00000010 /* Immutable file */
|
||||
linux_FS_APPEND_FL = 0x00000020 /* writes to file may only append */
|
||||
linux_FS_NODUMP_FL = 0x00000040 /* do not dump file */
|
||||
linux_FS_NOATIME_FL = 0x00000080 /* do not update atime */
|
||||
linux_FS_NOCOMP_FL = 0x00000400 /* Don't compress */
|
||||
linux_FS_JOURNAL_DATA_FL = 0x00004000 /* Reserved for ext3 */
|
||||
linux_FS_NOTAIL_FL = 0x00008000 /* file tail should not be merged */
|
||||
linux_FS_DIRSYNC_FL = 0x00010000 /* dirsync behaviour (directories only) */
|
||||
linux_FS_TOPDIR_FL = 0x00020000 /* Top of directory hierarchies*/
|
||||
linux_FS_NOCOW_FL = 0x00800000 /* Do not cow file */
|
||||
linux_FS_PROJINHERIT_FL = 0x20000000 /* Create with parents projid */
|
||||
|
||||
linux_FS_IOC_GETFLAGS uintptr = 0x80086601
|
||||
linux_FS_IOC_SETFLAGS uintptr = 0x40086602
|
||||
|
||||
linuxIocFlagsFileEarly = linux_FS_SECRM_FL | linux_FS_UNRM_FL | linux_FS_COMPR_FL | linux_FS_NODUMP_FL | linux_FS_NOATIME_FL | linux_FS_NOCOMP_FL | linux_FS_JOURNAL_DATA_FL | linux_FS_NOTAIL_FL | linux_FS_NOCOW_FL
|
||||
linuxIocFlagsDirEarly = linux_FS_TOPDIR_FL | linux_FS_PROJINHERIT_FL
|
||||
linuxIocFlagsLate = linux_FS_SYNC_FL | linux_FS_IMMUTABLE_FL | linux_FS_APPEND_FL | linux_FS_DIRSYNC_FL
|
||||
|
||||
linuxFileFlagsKey = "\x00lf"
|
||||
)
|
||||
|
||||
func ioctl(f *os.File, request uintptr, attrp *uint32) error {
|
||||
argp := uintptr(unsafe.Pointer(attrp))
|
||||
|
||||
if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, f.Fd(), request, argp); errno != 0 {
|
||||
return os.NewSyscallError("ioctl", errno)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type xattrHandle struct {
|
||||
f *os.File
|
||||
fullPath string
|
||||
}
|
||||
|
||||
func (x xattrHandle) list() ([]string, error) {
|
||||
if x.f != nil {
|
||||
return xattr.FList(x.f)
|
||||
} else {
|
||||
return xattr.LList(x.fullPath)
|
||||
}
|
||||
}
|
||||
|
||||
func (x xattrHandle) get(name string) ([]byte, error) {
|
||||
if x.f != nil {
|
||||
return xattr.FGet(x.f, name)
|
||||
} else {
|
||||
return xattr.LGet(x.fullPath, name)
|
||||
}
|
||||
}
|
||||
|
||||
func (x xattrHandle) set(name string, value []byte) error {
|
||||
if x.f != nil {
|
||||
return xattr.FSet(x.f, name, value)
|
||||
} else {
|
||||
return xattr.LSet(x.fullPath, name, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (x xattrHandle) remove(name string) error {
|
||||
if x.f != nil {
|
||||
return xattr.FRemove(x.f, name)
|
||||
} else {
|
||||
return xattr.LSet(x.fullPath, name)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) ReadAttributes(top string) {
|
||||
x := xattrHandle{nil, filepath.Join(top, entry.Path)}
|
||||
|
||||
if !entry.IsLink() {
|
||||
x.f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW|syscall.O_NONBLOCK, 0)
|
||||
if err != nil {
|
||||
// FIXME: We really should return errors for failure to read
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
attributes, _ := x.list()
|
||||
|
||||
if len(attributes) > 0 {
|
||||
entry.Attributes = &map[string][]byte{}
|
||||
}
|
||||
for _, name := range attributes {
|
||||
attribute, err := x.get(f, name)
|
||||
if err == nil {
|
||||
(*entry.Attributes)[name] = attribute
|
||||
}
|
||||
}
|
||||
|
||||
if entry.IsFile() || entry.IsDir() {
|
||||
if err := entry.readFileFlags(x.f); err != nil {
|
||||
LOG_INFO("ATTR_BACKUP", "Could not backup flags for file %s: %v", fullPath, err)
|
||||
}
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||
x := xattrHandle{nil, fullPath}
|
||||
if !entry.IsLink() {
|
||||
x.f, err := os.OpenFile(fullPath, os.O_RDONLY|syscall.O_NOFOLLOW, 0)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
names, _ := x.list()
|
||||
|
||||
for _, name := range names {
|
||||
newAttribute, found := (*entry.Attributes)[name]
|
||||
if found {
|
||||
oldAttribute, _ := x.get(name)
|
||||
if !bytes.Equal(oldAttribute, newAttribute) {
|
||||
x.set(name, newAttribute)
|
||||
}
|
||||
delete(*entry.Attributes, name)
|
||||
} else {
|
||||
x.remove(name)
|
||||
}
|
||||
}
|
||||
|
||||
for name, attribute := range *entry.Attributes {
|
||||
if len(name) > 0 && name[0] == '\x00' {
|
||||
continue
|
||||
}
|
||||
x.set(name, attribute)
|
||||
}
|
||||
if entry.IsFile() || entry.IsDir() {
|
||||
if err := entry.restoreLateFileFlags(f); err != nil {
|
||||
LOG_DEBUG("ATTR_RESTORE", "Could not restore flags for file %s: %v", fullPath, err)
|
||||
}
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func (entry *Entry) readFileFlags(f *os.File) error {
|
||||
var flags uint32
|
||||
if err := ioctl(f, linux_FS_IOC_GETFLAGS, &flags); err != nil {
|
||||
return err
|
||||
}
|
||||
if flags != 0 {
|
||||
if entry.Attributes == nil {
|
||||
entry.Attributes = &map[string][]byte{}
|
||||
}
|
||||
v := make([]byte, 4)
|
||||
binary.LittleEndian.PutUint32(v, flags)
|
||||
(*entry.Attributes)[linuxFileFlagsKey] = v
|
||||
LOG_DEBUG("ATTR_READ", "Read flags 0x%x for %s", flags, entry.Path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
|
||||
if entry.Attributes == nil {
|
||||
return nil
|
||||
}
|
||||
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
|
||||
flags := binary.LittleEndian.Uint32(v) & linuxIocFlagsDirEarly
|
||||
f, err := os.OpenFile(path, os.O_RDONLY|syscall.O_DIRECTORY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
LOG_DEBUG("ATTR_RESTORE", "Restore dir flags (early) 0x%x for %s", flags, entry.Path)
|
||||
err = ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
|
||||
f.Close()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
|
||||
if entry.Attributes == nil {
|
||||
return nil
|
||||
}
|
||||
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
|
||||
flags := binary.LittleEndian.Uint32(v) & linuxIocFlagsFileEarly
|
||||
LOG_DEBUG("ATTR_RESTORE", "Restore flags (early) 0x%x for %s", flags, entry.Path)
|
||||
return ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) restoreLateFileFlags(f *os.File) error {
|
||||
if entry.Attributes == nil {
|
||||
return nil
|
||||
}
|
||||
if v, have := (*entry.Attributes)[linuxFileFlagsKey]; have {
|
||||
flags := binary.LittleEndian.Uint32(v) & (linuxIocFlagsFileEarly | linuxIocFlagsDirEarly | linuxIocFlagsLate)
|
||||
LOG_DEBUG("ATTR_RESTORE", "Restore flags (late) 0x%x for %s", flags, entry.Path)
|
||||
return ioctl(f, linux_FS_IOC_SETFLAGS, &flags)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func excludedByAttribute(attributes map[string][]byte) bool {
|
||||
_, ok := attributes["user.duplicacy_exclude"]
|
||||
return ok
|
||||
|
||||
@@ -2,15 +2,18 @@
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
)
|
||||
|
||||
func Readlink(path string) (isRegular bool, s string, err error) {
|
||||
@@ -44,6 +47,45 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (entry *Entry) ReadAttributes(top string) {
|
||||
|
||||
fullPath := filepath.Join(top, entry.Path)
|
||||
attributes, _ := xattr.List(fullPath)
|
||||
if len(attributes) > 0 {
|
||||
entry.Attributes = &map[string][]byte{}
|
||||
for _, name := range attributes {
|
||||
attribute, err := xattr.Get(fullPath, name)
|
||||
if err == nil {
|
||||
(*entry.Attributes)[name] = attribute
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||
names, _ := xattr.List(fullPath)
|
||||
|
||||
for _, name := range names {
|
||||
|
||||
|
||||
newAttribute, found := (*entry.Attributes)[name]
|
||||
if found {
|
||||
oldAttribute, _ := xattr.Get(fullPath, name)
|
||||
if !bytes.Equal(oldAttribute, newAttribute) {
|
||||
xattr.Set(fullPath, name, newAttribute)
|
||||
}
|
||||
delete(*entry.Attributes, name)
|
||||
} else {
|
||||
xattr.Remove(fullPath, name)
|
||||
}
|
||||
}
|
||||
|
||||
for name, attribute := range *entry.Attributes {
|
||||
xattr.Set(fullPath, name, attribute)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func joinPath(components ...string) string {
|
||||
return path.Join(components...)
|
||||
}
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
//go:build freebsd || netbsd || solaris
|
||||
// +build freebsd netbsd solaris
|
||||
|
||||
package duplicacy
|
||||
|
||||
func excludedByAttribute(attributes map[string][]byte) bool {
|
||||
_, ok := attributes["user.duplicacy_exclude"]
|
||||
return ok
|
||||
}
|
||||
@@ -132,18 +132,6 @@ func SplitDir(fullPath string) (dir string, file string) {
|
||||
return fullPath[:i+1], fullPath[i+1:]
|
||||
}
|
||||
|
||||
func (entry *Entry) ReadFileFlags(f *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) RestoreEarlyDirFlags(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) RestoreEarlyFileFlags(f *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (entry *Entry) RestoreLateFileFlags(f *os.File) error {
|
||||
return nil
|
||||
func excludedByAttribute(attributes map[string][]byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -7,24 +7,7 @@
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/xattr"
|
||||
)
|
||||
|
||||
func (entry *Entry) restoreLateFileFlags(path string) error {
|
||||
if entry.Attributes == nil {
|
||||
return nil
|
||||
}
|
||||
if v, have := (*entry.Attributes)[bsdFileFlagsKey]; have {
|
||||
if _, _, errno := syscall.Syscall(syscall.SYS_LCHFLAGS, uintptr(unsafe.Pointer(syscall.StringBytePtr(path))), uintptr(v), 0); errno != 0 {
|
||||
return os.NewSyscallError("lchflags", errno)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
func excludedByAttribute(attributes map[string][]byte) bool {
|
||||
_, ok := attributes["duplicacy_exclude"]
|
||||
return ok
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user