mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-02 03:34:39 -06:00
Compare commits
17 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
175adb14cb | ||
|
|
ae706e3dcf | ||
|
|
5eed6c65f6 | ||
|
|
bec3a0edcd | ||
|
|
b392302c06 | ||
|
|
7c36311aa9 | ||
|
|
7f834e84f6 | ||
|
|
d7c1903d5a | ||
|
|
7da58c6d49 | ||
|
|
4402be6763 | ||
|
|
3abec4e37a | ||
|
|
dd40d4cd2f | ||
|
|
923e906b7e | ||
|
|
0da55f95ab | ||
|
|
2f407d6af9 | ||
|
|
bb680538ee | ||
|
|
7e372edd68 |
10
Gopkg.lock
generated
10
Gopkg.lock
generated
@@ -52,7 +52,7 @@
|
||||
[[projects]]
|
||||
name = "github.com/gilbertchen/go-dropbox"
|
||||
packages = ["."]
|
||||
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
|
||||
revision = "2233fa1dd846b3a3e8060b6c1ea12883deb9d288"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gilbertchen/go-ole"
|
||||
@@ -173,6 +173,12 @@
|
||||
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
|
||||
version = "v1.11.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/xattr"
|
||||
packages = ["."]
|
||||
revision = "dd870b5cfebab49617ea0c1da6176474e8a52bf4"
|
||||
version = "v0.4.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
@@ -265,6 +271,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "0e6ea2be64dedc36cb9192f1d410917ea72896302011e55b6df5e4c00c1c2f1c"
|
||||
inputs-digest = "e46f7c2dac527af6d5a0d47f1444421a6738d28252eb5d6084fc1c65f2b41bd8"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
@@ -47,7 +47,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gilbertchen/go-dropbox"
|
||||
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
|
||||
revision = "2233fa1dd846b3a3e8060b6c1ea12883deb9d288"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gilbertchen/go-ole"
|
||||
|
||||
12
README.md
12
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication).
|
||||
|
||||
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
|
||||
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Web GUI frontend built for Windows, macOS, and Linux, available from https://duplicacy.com.
|
||||
|
||||
There is a special edition of Duplicacy developed for VMware vSphere (ESXi) named [Vertical Backup](https://www.verticalbackup.com) that can back up virtual machine files on ESXi to local drives, network or cloud storages.
|
||||
|
||||
@@ -10,14 +10,15 @@ There is a special edition of Duplicacy developed for VMware vSphere (ESXi) name
|
||||
|
||||
There are 3 core advantages of Duplicacy over any other open-source or commercial backup tools:
|
||||
|
||||
* Duplicacy is the *only* cloud backup tool that allows multiple computers to back up to the same cloud storage, taking advantage of cross-computer deduplication whenever possible, without direct communication among them. This feature literally turns any cloud storage server supporting only a basic set of file operations into a sophisticated deduplication-aware server.
|
||||
* Duplicacy is the *only* cloud backup tool that allows multiple computers to back up to the same cloud storage, taking advantage of cross-computer deduplication whenever possible, without direct communication among them. This feature turns any cloud storage server supporting only a basic set of file operations into a sophisticated deduplication-aware server.
|
||||
|
||||
* Unlike other chunk-based backup tools where chunks are grouped into pack files and a chunk database is used to track which chunks are stored inside each pack file, Duplicacy takes a database-less approach where every chunk is saved independently using its hash as the file name to facilitate quick lookups. The lack of a centralized chunk database not only makes the implementation less error-prone, but also produces a highly maintainable piece of software with plenty of room for development of new features and usability enhancements.
|
||||
* Unlike other chunk-based backup tools where chunks are grouped into pack files and a chunk database is used to track which chunks are stored inside each pack file, Duplicacy takes a database-less approach where every chunk is saved independently using its hash as the file name to facilitate quick lookups. The avoidance of a centralized chunk database not only produces a simpler and less error-prone implementation, but also makes it easier to develop advanced features, such as [Asymmetric Encryption](https://github.com/gilbertchen/duplicacy/wiki/RSA-encryption) for stronger encryption and [Erasure Coding](https://github.com/gilbertchen/duplicacy/wiki/Erasure-coding) for resilient data protection.
|
||||
|
||||
* Duplicacy is fast. While the performance wasn't the top-priority design goal, Duplicacy has been shown to outperform other backup tools by a considerable margin, as indicated by the following results obtained from a [benchmarking experiment](https://github.com/gilbertchen/benchmarking) backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other open-source backup tools.
|
||||
|
||||
[](https://github.com/gilbertchen/benchmarking)
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [A brief introduction](https://github.com/gilbertchen/duplicacy/wiki/Quick-Start)
|
||||
@@ -44,6 +45,7 @@ Duplicacy currently provides the following storage backends:
|
||||
* WebDAV (under beta testing)
|
||||
* pcloud (via WebDAV)
|
||||
* Box.com (via WebDAV)
|
||||
* File Fabric by [Storage Made Easy](https://storagemadeeasy.com/)
|
||||
|
||||
Please consult the [wiki page](https://github.com/gilbertchen/duplicacy/wiki/Storage-Backends) on how to set up Duplicacy to work with each cloud storage.
|
||||
|
||||
@@ -64,9 +66,9 @@ to find the differences from previous backups and only then uploading the differ
|
||||
|
||||
[Duplicati](https://duplicati.com) is one of the first backup tools that adopt the chunk-based approach to split files into chunks which are then uploaded to the storage. The chunk-based approach got the incremental backup model right in the sense that every incremental backup is actually a full snapshot. As Duplicati splits files into fixed-size chunks, deletions or insertions of a few bytes will foil the deduplication. Cloud support is extensive, but multiple clients can't back up to the same storage location.
|
||||
|
||||
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model like Duplicati, but embraces the variable-size chunk algorithm for better performance and higher deduplication efficiency (not susceptible to byte insertion and deletion any more). Deletions of old backup is also supported. However, no cloud backends are implemented. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
|
||||
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model like Duplicati but embraces the variable-size chunk algorithm for better performance and higher deduplication efficiency (not susceptible to byte insertion and deletion any more). Deletions of old backup are also supported. However, no cloud backends are implemented. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
|
||||
[not recommended](http://librelist.com/browser//attic/2014/11/11/backing-up-multiple-servers-into-a-single-repository/#e96345aa5a3469a87786675d65da492b) by the developer due to chunk indices being kept in a local cache.
|
||||
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks, and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be extended to cloud storages.
|
||||
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be extended to cloud storages.
|
||||
|
||||
[restic](https://restic.github.io) is a more recent addition. It uses a format similar to the git packfile format. Multiple clients backing up to the same storage are still guarded by
|
||||
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks), and because a chunk database is used, deduplication isn't real-time (different clients sharing the same files will upload different copies of the same chunks). A prune operation will completely block all other clients connected to the storage from doing their regular backups. Moreover, since most cloud storage services do not provide a locking service, the best effort is to use some basic file operations to simulate a lock, but distributed locking is known to be a hard problem and it is unclear how reliable restic's lock implementation is. A faulty implementation may cause a prune operation to accidentally delete data still in use, resulting in unrecoverable data loss. This is the exact problem that we avoided by taking the lock-free approach.
|
||||
|
||||
@@ -274,6 +274,13 @@ func configRepository(context *cli.Context, init bool) {
|
||||
}
|
||||
}
|
||||
|
||||
snapshotIDRegex := regexp.MustCompile(`^[A-Za-z0-9_\-]+$`)
|
||||
matched := snapshotIDRegex.FindStringSubmatch(snapshotID)
|
||||
if matched == nil {
|
||||
duplicacy.LOG_ERROR("PREFERENCE_INVALID", "'%s' is an invalid snapshot id", snapshotID)
|
||||
return
|
||||
}
|
||||
|
||||
var repository string
|
||||
var err error
|
||||
|
||||
@@ -2179,7 +2186,7 @@ func main() {
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "2.7.0" + " (" + GitCommit + ")"
|
||||
app.Version = "2.7.2" + " (" + GitCommit + ")"
|
||||
|
||||
// If the program is interrupted, call the RunAtError function.
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
@@ -1377,7 +1377,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
}
|
||||
|
||||
// fileHash != entry.Hash, warn/error depending on -overwrite option
|
||||
if !overwrite {
|
||||
if !overwrite && !isNewFile {
|
||||
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
|
||||
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
|
||||
return false, fmt.Errorf("file exists")
|
||||
|
||||
@@ -31,6 +31,9 @@ type ChunkDownloadCompletion struct {
|
||||
// corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be
|
||||
// inserted in the completed task list.
|
||||
type ChunkDownloader struct {
|
||||
totalChunkSize int64 // Total chunk size
|
||||
downloadedChunkSize int64 // Downloaded chunk size
|
||||
|
||||
config *Config // Associated config
|
||||
storage Storage // Download from this storage
|
||||
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
|
||||
@@ -47,8 +50,6 @@ type ChunkDownloader struct {
|
||||
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
|
||||
|
||||
startTime int64 // The time it starts downloading
|
||||
totalChunkSize int64 // Total chunk size
|
||||
downloadedChunkSize int64 // Downloaded chunk size
|
||||
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
||||
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
||||
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
||||
|
||||
@@ -79,7 +79,7 @@ func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []stri
|
||||
|
||||
for _, f := range list {
|
||||
name := f.Name()
|
||||
if f.IsDir() && name[len(name)-1] != '/' {
|
||||
if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' {
|
||||
name += "/"
|
||||
}
|
||||
files = append(files, name)
|
||||
@@ -165,8 +165,8 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !stat.IsDir() {
|
||||
return fmt.Errorf("The path %s is not a directory", dir)
|
||||
if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 {
|
||||
return fmt.Errorf("The path %s is not a directory or symlink", dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
@@ -77,19 +76,19 @@ func DeleteShadowCopy() {
|
||||
|
||||
err := exec.Command("/sbin/umount", "-f", snapshotPath).Run()
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_DELETE", "Error while unmounting snapshot")
|
||||
LOG_WARN("VSS_DELETE", "Error while unmounting snapshot: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = exec.Command("tmutil", "deletelocalsnapshots", snapshotDate).Run()
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_DELETE", "Error while deleting local snapshot")
|
||||
LOG_WARN("VSS_DELETE", "Error while deleting local snapshot: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = os.RemoveAll(snapshotPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_DELETE", "Error while deleting temporary mount directory")
|
||||
LOG_WARN("VSS_DELETE", "Error while deleting temporary mount directory: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -150,12 +149,13 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
||||
return top
|
||||
}
|
||||
|
||||
colonPos := strings.IndexByte(tmutilOutput, ':')
|
||||
if colonPos < 0 {
|
||||
snapshotDateRegex := regexp.MustCompile(`:\s+([0-9\-]+)`)
|
||||
matched := snapshotDateRegex.FindStringSubmatch(tmutilOutput)
|
||||
if matched == nil {
|
||||
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
|
||||
return top
|
||||
}
|
||||
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
|
||||
snapshotDate = matched[1]
|
||||
|
||||
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
|
||||
if err != nil {
|
||||
@@ -164,17 +164,17 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
||||
}
|
||||
snapshotName := "com.apple.TimeMachine." + snapshotDate
|
||||
|
||||
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
|
||||
snapshotNames := r.FindStringSubmatch(tmutilOutput)
|
||||
if len(snapshotNames) > 0 {
|
||||
snapshotName = snapshotNames[0]
|
||||
snapshotNameRegex := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
|
||||
matched = snapshotNameRegex.FindStringSubmatch(tmutilOutput)
|
||||
if len(matched) > 0 {
|
||||
snapshotName = matched[0]
|
||||
} else {
|
||||
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
|
||||
LOG_INFO("VSS_CREATE", "Can't find the snapshot name with 'tmutil listlocalsnapshots'; fallback to %s", snapshotName)
|
||||
}
|
||||
|
||||
// Mount snapshot as readonly and hide from GUI i.e. Finder
|
||||
_, err = CommandWithTimeout(timeoutInSeconds,
|
||||
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
|
||||
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/System/Volumes/Data", snapshotPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
|
||||
return top
|
||||
|
||||
@@ -1017,50 +1017,105 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||
}
|
||||
|
||||
if checkChunks && !checkFiles {
|
||||
manager.chunkDownloader.snapshotCache = nil
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
|
||||
// Don't verify chunks with -files
|
||||
if !checkChunks || checkFiles {
|
||||
return true
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
var chunkHashes []string
|
||||
// This contains chunks that have been verifed in previous checks and is loaded from
|
||||
// .duplicacy/cache/storage/verified_chunks. Note that it contains the chunk ids not chunk
|
||||
// hashes.
|
||||
verifiedChunks := make(map[string]int64)
|
||||
verifiedChunksFile := "verified_chunks"
|
||||
|
||||
// The index of the first chunk to add to the downloader, which may have already downloaded
|
||||
// some metadata chunks so the index doesn't start with 0.
|
||||
chunkIndex := -1
|
||||
manager.fileChunk.Reset(false)
|
||||
err = manager.snapshotCache.DownloadFile(0, verifiedChunksFile, manager.fileChunk)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
LOG_WARN("SNAPSHOT_VERIFY", "Failed to load the file containing verified chunks: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = json.Unmarshal(manager.fileChunk.GetBytes(), &verifiedChunks)
|
||||
if err != nil {
|
||||
LOG_WARN("SNAPSHOT_VERIFY", "Failed to parse the file containing verified chunks: %v", err)
|
||||
}
|
||||
}
|
||||
numberOfVerifiedChunks := len(verifiedChunks)
|
||||
|
||||
for chunkHash := range *allChunkHashes {
|
||||
chunkHashes = append(chunkHashes, chunkHash)
|
||||
if chunkIndex == -1 {
|
||||
chunkIndex = manager.chunkDownloader.AddChunk(chunkHash)
|
||||
saveVerifiedChunks := func() {
|
||||
if len(verifiedChunks) > numberOfVerifiedChunks {
|
||||
var description []byte
|
||||
description, err = json.Marshal(verifiedChunks)
|
||||
if err != nil {
|
||||
LOG_WARN("SNAPSHOT_VERIFY", "Failed to create a json file for the set of verified chunks: %v", err)
|
||||
} else {
|
||||
manager.chunkDownloader.AddChunk(chunkHash)
|
||||
err = manager.snapshotCache.UploadFile(0, verifiedChunksFile, description)
|
||||
if err != nil {
|
||||
LOG_WARN("SNAPSHOT_VERIFY", "Failed to save the verified chunks file: %v", err)
|
||||
} else {
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks) - numberOfVerifiedChunks)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
defer saveVerifiedChunks()
|
||||
RunAtError = saveVerifiedChunks
|
||||
|
||||
var downloadedChunkSize int64
|
||||
totalChunks := len(*allChunkHashes)
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
chunk := manager.chunkDownloader.WaitForChunk(i + chunkIndex)
|
||||
if chunk.isBroken {
|
||||
manager.chunkDownloader.snapshotCache = nil
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
|
||||
|
||||
startTime := time.Now()
|
||||
var chunkHashes []string
|
||||
|
||||
// The index of the first chunk to add to the downloader, which may have already downloaded
|
||||
// some metadata chunks so the index doesn't start with 0.
|
||||
chunkIndex := -1
|
||||
|
||||
skippedChunks := 0
|
||||
for chunkHash := range *allChunkHashes {
|
||||
if len(verifiedChunks) > 0 {
|
||||
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
||||
if _, found := verifiedChunks[chunkID]; found {
|
||||
skippedChunks++
|
||||
continue
|
||||
}
|
||||
downloadedChunkSize += int64(chunk.GetLength())
|
||||
|
||||
elapsedTime := time.Now().Sub(startTime).Seconds()
|
||||
speed := int64(float64(downloadedChunkSize) / elapsedTime)
|
||||
remainingTime := int64(float64(totalChunks - i - 1) / float64(i + 1) * elapsedTime)
|
||||
percentage := float64(i + 1) / float64(totalChunks) * 100.0
|
||||
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
|
||||
manager.config.GetChunkIDFromHash(chunkHashes[i]), i + 1, totalChunks,
|
||||
PrettySize(speed), PrettyTime(remainingTime), percentage)
|
||||
}
|
||||
|
||||
if manager.chunkDownloader.NumberOfFailedChunks > 0 {
|
||||
LOG_ERROR("SNAPSHOT_VERIFY", "%d out of %d chunks are corrupted", manager.chunkDownloader.NumberOfFailedChunks, totalChunks)
|
||||
chunkHashes = append(chunkHashes, chunkHash)
|
||||
if chunkIndex == -1 {
|
||||
chunkIndex = manager.chunkDownloader.AddChunk(chunkHash)
|
||||
} else {
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", totalChunks)
|
||||
manager.chunkDownloader.AddChunk(chunkHash)
|
||||
}
|
||||
}
|
||||
|
||||
if skippedChunks > 0 {
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "Skipped %d chunks that have already been verified before", skippedChunks)
|
||||
}
|
||||
|
||||
var downloadedChunkSize int64
|
||||
totalChunks := len(chunkHashes)
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
chunk := manager.chunkDownloader.WaitForChunk(i + chunkIndex)
|
||||
chunkID := manager.config.GetChunkIDFromHash(chunkHashes[i])
|
||||
if chunk.isBroken {
|
||||
continue
|
||||
}
|
||||
verifiedChunks[chunkID] = startTime.Unix()
|
||||
downloadedChunkSize += int64(chunk.GetLength())
|
||||
|
||||
elapsedTime := time.Now().Sub(startTime).Seconds()
|
||||
speed := int64(float64(downloadedChunkSize) / elapsedTime)
|
||||
remainingTime := int64(float64(totalChunks - i - 1) / float64(i + 1) * elapsedTime)
|
||||
percentage := float64(i + 1) / float64(totalChunks) * 100.0
|
||||
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
|
||||
chunkID, i + 1, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage)
|
||||
}
|
||||
|
||||
if manager.chunkDownloader.NumberOfFailedChunks > 0 {
|
||||
LOG_ERROR("SNAPSHOT_VERIFY", "%d out of %d chunks are corrupted", manager.chunkDownloader.NumberOfFailedChunks, len(*allChunkHashes))
|
||||
} else {
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", len(*allChunkHashes))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/gilbertchen/xattr"
|
||||
"github.com/pkg/xattr"
|
||||
)
|
||||
|
||||
func Readlink(path string) (isRegular bool, s string, err error) {
|
||||
@@ -50,11 +50,11 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||
func (entry *Entry) ReadAttributes(top string) {
|
||||
|
||||
fullPath := filepath.Join(top, entry.Path)
|
||||
attributes, _ := xattr.Listxattr(fullPath)
|
||||
attributes, _ := xattr.List(fullPath)
|
||||
if len(attributes) > 0 {
|
||||
entry.Attributes = make(map[string][]byte)
|
||||
for _, name := range attributes {
|
||||
attribute, err := xattr.Getxattr(fullPath, name)
|
||||
attribute, err := xattr.Get(fullPath, name)
|
||||
if err == nil {
|
||||
entry.Attributes[name] = attribute
|
||||
}
|
||||
@@ -63,24 +63,25 @@ func (entry *Entry) ReadAttributes(top string) {
|
||||
}
|
||||
|
||||
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||
names, _ := xattr.Listxattr(fullPath)
|
||||
names, _ := xattr.List(fullPath)
|
||||
|
||||
for _, name := range names {
|
||||
|
||||
|
||||
newAttribute, found := entry.Attributes[name]
|
||||
if found {
|
||||
oldAttribute, _ := xattr.Getxattr(fullPath, name)
|
||||
oldAttribute, _ := xattr.Get(fullPath, name)
|
||||
if !bytes.Equal(oldAttribute, newAttribute) {
|
||||
xattr.Setxattr(fullPath, name, newAttribute)
|
||||
xattr.Set(fullPath, name, newAttribute)
|
||||
}
|
||||
delete(entry.Attributes, name)
|
||||
} else {
|
||||
xattr.Removexattr(fullPath, name)
|
||||
xattr.Remove(fullPath, name)
|
||||
}
|
||||
}
|
||||
|
||||
for name, attribute := range entry.Attributes {
|
||||
xattr.Setxattr(fullPath, name, attribute)
|
||||
xattr.Set(fullPath, name, attribute)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user