diff --git a/duplicacy/duplicacy_main.go b/duplicacy/duplicacy_main.go index 188dda8..c7fe78c 100644 --- a/duplicacy/duplicacy_main.go +++ b/duplicacy/duplicacy_main.go @@ -850,13 +850,15 @@ func restoreRepository(context *cli.Context) { password = duplicacy.GetPassword(*preference, "password", "Enter storage password:", false, false) } - quickMode := !context.Bool("hash") - overwrite := context.Bool("overwrite") - deleteMode := context.Bool("delete") - setOwner := !context.Bool("ignore-owner") - - showStatistics := context.Bool("stats") - persist := context.Bool("persist") + options := duplicacy.RestoreOptions{ + InPlace: true, + QuickMode: !context.Bool("hash"), + Overwrite: context.Bool("overwrite"), + DeleteMode: context.Bool("delete"), + SetOwner: !context.Bool("ignore-owner"), + ShowStatistics: context.Bool("stats"), + AllowFailures: context.Bool("persist"), + } var patterns []string for _, pattern := range context.Args() { @@ -874,7 +876,7 @@ func restoreRepository(context *cli.Context) { patterns = append(patterns, pattern) } - patterns = duplicacy.ProcessFilterLines(patterns, make([]string, 0)) + options.Patterns = duplicacy.ProcessFilterLines(patterns, make([]string, 0)) duplicacy.LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(duplicacy.RegexMap)) @@ -887,7 +889,7 @@ func restoreRepository(context *cli.Context) { loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false) backupManager.SetupSnapshotCache(preference.Name) - failed := backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns, persist) + failed := backupManager.Restore(repository, revision, options) if failed > 0 { duplicacy.LOG_ERROR("RESTORE_FAIL", "%d file(s) were not restored correctly", failed) return diff --git a/src/duplicacy_backupmanager.go b/src/duplicacy_backupmanager.go index 96e9d11..64755c6 100644 --- a/src/duplicacy_backupmanager.go +++ b/src/duplicacy_backupmanager.go @@ -25,7 +25,6 @@ import ( // BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to // snapshot management, to the snapshot manager. - type BackupManager struct { snapshotID string // Unique id for each repository storage Storage // the storage for storing backups @@ -42,6 +41,21 @@ type BackupManager struct { cachePath string } +type BackupOptions struct { +} + +type RestoreOptions struct { + Threads int + Patterns []string + InPlace bool + QuickMode bool + Overwrite bool + DeleteMode bool + SetOwner bool + ShowStatistics bool + AllowFailures bool +} + func (manager *BackupManager) SetDryRun(dryRun bool) { manager.config.dryRun = dryRun } @@ -622,21 +636,26 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta } // Restore downloads the specified snapshot, compares it with what's on the repository, and then downloads -// files that are different. 'base' is a directory that contains files at a different revision which can -// serve as a local cache to avoid download chunks available locally. It is perfectly ok for 'base' to be -// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will -// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files. -func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool, - deleteMode bool, setOwner bool, showStatistics bool, patterns []string, allowFailures bool) int { +// files that are different.'QuickMode' will bypass files with unchanged sizes and timestamps. 'DeleteMode' will +// remove local files that don't exist in the snapshot. 'Patterns' is used to include/exclude certain files. +func (manager *BackupManager) Restore(top string, revision int, options RestoreOptions) int { + if options.Threads < 1 { + options.Threads = 1 + } + + patterns := options.Patterns + + overwrite := options.Overwrite + allowFailures := options.AllowFailures startTime := time.Now().Unix() LOG_DEBUG("RESTORE_PARAMETERS", "top: %s, revision: %d, in-place: %t, quick: %t, delete: %t", - top, revision, inPlace, quickMode, deleteMode) + top, revision, options.InPlace, options.QuickMode, options.DeleteMode) if !strings.HasPrefix(GetDuplicacyPreferencePath(), top) { LOG_INFO("RESTORE_INPLACE", "Forcing in-place mode with a non-default preference path") - inPlace = true + options.InPlace = true } if len(patterns) > 0 { @@ -678,7 +697,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu localListingChannel := make(chan *Entry) remoteListingChannel := make(chan *Entry) - chunkOperator := CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, showStatistics, false, threads, allowFailures) + chunkOperator := CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, options.ShowStatistics, + false, options.Threads, allowFailures) LOG_INFO("RESTORE_INDEXING", "Indexing %s", top) go func() { @@ -763,7 +783,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu } if compareResult == 0 { - if quickMode && localEntry.IsFile() && localEntry.IsSameAs(remoteEntry) { + if options.QuickMode && localEntry.IsFile() && localEntry.IsSameAs(remoteEntry) { LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", localEntry.Path) skippedFileSize += localEntry.Size skippedFileCount++ @@ -780,7 +800,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu if stat.Mode()&os.ModeSymlink != 0 { isRegular, link, err := Readlink(fullPath) if err == nil && link == remoteEntry.Link && !isRegular { - remoteEntry.RestoreMetadata(fullPath, nil, setOwner) + remoteEntry.RestoreMetadata(fullPath, nil, options.SetOwner) if remoteEntry.IsHardLinkRoot() { hardLinkTable[len(hardLinkTable)-1].willExist = true } @@ -805,7 +825,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", remoteEntry.Path, err) return 0 } - remoteEntry.RestoreMetadata(fullPath, nil, setOwner) + remoteEntry.RestoreMetadata(fullPath, nil, options.SetOwner) LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", remoteEntry.Path) } else if remoteEntry.IsDir() { @@ -834,7 +854,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu } else if remoteEntry.IsSpecial() { if stat, _ := os.Lstat(fullPath); stat != nil { if remoteEntry.IsSameSpecial(stat) { - remoteEntry.RestoreMetadata(fullPath, nil, setOwner) + remoteEntry.RestoreMetadata(fullPath, nil, options.SetOwner) if remoteEntry.IsHardLinkRoot() { hardLinkTable[len(hardLinkTable)-1].willExist = true } @@ -856,7 +876,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu LOG_ERROR("RESTORE_SPECIAL", "Failed to restore special file %s: %v", fullPath, err) return 0 } - remoteEntry.RestoreMetadata(fullPath, nil, setOwner) + remoteEntry.RestoreMetadata(fullPath, nil, options.SetOwner) LOG_TRACE("DOWNLOAD_DONE", "Special %s %s restored", remoteEntry.Path, remoteEntry.FmtSpecial()) } else { @@ -930,7 +950,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu fullPath := joinPath(top, file.Path) stat, _ := os.Stat(fullPath) if stat != nil { - if quickMode { + if options.QuickMode { if file.IsSameAsFileInfo(stat) { LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path) skippedFileSize += file.Size @@ -962,8 +982,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu } newFile.Close() - file.RestoreMetadata(fullPath, nil, setOwner) - if !showStatistics { + file.RestoreMetadata(fullPath, nil, options.SetOwner) + if !options.ShowStatistics { LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (0)", file.Path) downloadedFileSize += file.Size downloadedFiles = append(downloadedFiles, file) @@ -972,8 +992,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu continue } - downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics, - totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures) + downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, options.InPlace, overwrite, + options.ShowStatistics, totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures) if err != nil { // RestoreFile returned an error; if allowFailures is false RestoerFile would error out and not return so here // we just need to show a warning @@ -992,7 +1012,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu skippedFileSize += file.Size skippedFileCount++ } - file.RestoreMetadata(fullPath, nil, setOwner) + file.RestoreMetadata(fullPath, nil, options.SetOwner) } for _, linkEntry := range hardLinks { @@ -1027,7 +1047,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu LOG_TRACE("RESTORE_HARDLINK", "Hard linked %s to %s", linkEntry.Path, hardLinkTable[i].entry.Path) } - if deleteMode && len(patterns) == 0 { + if options.DeleteMode && len(patterns) == 0 { // Reverse the order to make sure directories are empty before being deleted for i := range extraFiles { file := extraFiles[len(extraFiles)-1-i] @@ -1039,10 +1059,10 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu for _, entry := range directoryEntries { dir := joinPath(top, entry.Path) - entry.RestoreMetadata(dir, nil, setOwner) + entry.RestoreMetadata(dir, nil, options.SetOwner) } - if showStatistics { + if options.ShowStatistics { for _, file := range downloadedFiles { LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size) } @@ -1053,7 +1073,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu } LOG_INFO("RESTORE_END", "Restored %s to revision %d", top, revision) - if showStatistics { + if options.ShowStatistics { LOG_INFO("RESTORE_STATS", "Files: %d total, %s bytes", len(fileEntries), PrettySize(totalFileSize)) LOG_INFO("RESTORE_STATS", "Downloaded %d file, %s bytes, %d chunks", len(downloadedFiles), PrettySize(downloadedFileSize), chunkDownloader.numberOfDownloadedChunks) diff --git a/src/duplicacy_backupmanager_test.go b/src/duplicacy_backupmanager_test.go index 3eb0656..ad4d455 100644 --- a/src/duplicacy_backupmanager_test.go +++ b/src/duplicacy_backupmanager_test.go @@ -260,8 +260,17 @@ func TestBackupManager(t *testing.T) { backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024) time.Sleep(time.Duration(delay) * time.Second) SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") - failedFiles := backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) + failedFiles := backupManager.Restore(testDir+"/repository2", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: false, + QuickMode: false, + Overwrite: true, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: false, + }) assertRestoreFailures(t, failedFiles, 0) for _, f := range []string{"file1", "file2", "dir1/file3"} { @@ -285,8 +294,17 @@ func TestBackupManager(t *testing.T) { backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false, 1024, 1024) time.Sleep(time.Duration(delay) * time.Second) SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") - failedFiles = backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) + failedFiles = backupManager.Restore(testDir+"/repository2", 2, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: true, + Overwrite: true, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: false, + }) assertRestoreFailures(t, failedFiles, 0) for _, f := range []string{"file1", "file2", "dir1/file3"} { @@ -314,8 +332,17 @@ func TestBackupManager(t *testing.T) { createRandomFile(testDir+"/repository2/dir5/file5", 100) SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") - failedFiles = backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, - /*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) + failedFiles = backupManager.Restore(testDir+"/repository2", 3, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: false, + Overwrite: true, + DeleteMode: true, + SetOwner: false, + ShowStatistics: false, + AllowFailures: false, + }) assertRestoreFailures(t, failedFiles, 0) for _, f := range []string{"file1", "file2", "dir1/file3"} { @@ -342,8 +369,17 @@ func TestBackupManager(t *testing.T) { os.Remove(testDir + "/repository1/file2") os.Remove(testDir + "/repository1/dir1/file3") SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy") - failedFiles = backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"} /*allowFailures=*/, false) + failedFiles = backupManager.Restore(testDir+"/repository1", 3, RestoreOptions{ + Threads: threads, + Patterns: []string{"+file2", "+dir1/file3", "-*"}, + InPlace: true, + QuickMode: false, + Overwrite: true, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: false, + }) assertRestoreFailures(t, failedFiles, 0) for _, f := range []string{"file1", "file2", "dir1/file3"} { @@ -358,17 +394,17 @@ func TestBackupManager(t *testing.T) { if numberOfSnapshots != 3 { t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots) } - - backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1, 2, 3}, /*tag*/ "", /*showStatistics*/ false, - /*showTabular*/ false, /*checkFiles*/ false, /*checkChunks*/ false, /*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/false) + + backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "" /*showStatistics*/, false, + /*showTabular*/ false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false /*rewiret*/, false, 1 /*allowFailures*/, false) backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil, /*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1) numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false) if numberOfSnapshots != 2 { t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots) } - backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{2, 3}, /*tag*/ "", /*showStatistics*/ false, - /*showTabular*/ false, /*checkFiles*/ false, /*checkChunks*/ false, /*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/ false) + backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "" /*showStatistics*/, false, + /*showTabular*/ false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false /*rewiret*/, false, 1 /*allowFailures*/, false) backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false, 1024, 1024) backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil, /*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1) @@ -376,8 +412,8 @@ func TestBackupManager(t *testing.T) { if numberOfSnapshots != 3 { t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots) } - backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{2, 3, 4}, /*tag*/ "", /*showStatistics*/ false, - /*showTabular*/ false, /*checkFiles*/ false, /*checkChunks*/ false, /*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/ false) + backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "" /*showStatistics*/, false, + /*showTabular*/ false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false /*rewiret*/, false, 1 /*allowFailures*/, false) /*buf := make([]byte, 1<<16) runtime.Stack(buf, true) @@ -478,9 +514,9 @@ func TestPersistRestore(t *testing.T) { maxFileSize := 1000000 //maxFileSize := 200000 - createRandomFileSeeded(testDir+"/repository1/file1", maxFileSize,1) - createRandomFileSeeded(testDir+"/repository1/file2", maxFileSize,2) - createRandomFileSeeded(testDir+"/repository1/dir1/file3", maxFileSize,3) + createRandomFileSeeded(testDir+"/repository1/file1", maxFileSize, 1) + createRandomFileSeeded(testDir+"/repository1/file2", maxFileSize, 2) + createRandomFileSeeded(testDir+"/repository1/dir1/file3", maxFileSize, 3) threads := 1 @@ -537,7 +573,6 @@ func TestPersistRestore(t *testing.T) { unencBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024) time.Sleep(time.Duration(delay) * time.Second) - // do encrypted backup SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy") encBackupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false) @@ -547,68 +582,67 @@ func TestPersistRestore(t *testing.T) { encBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024) time.Sleep(time.Duration(delay) * time.Second) - // check snapshots - unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1}, /*tag*/ "", - /*showStatistics*/ true, /*showTabular*/ false, /*checkFiles*/ true, /*checkChunks*/ false, - /*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/ false) + unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "", + /*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false, + /*searchFossils*/ false /*resurrect*/, false /*rewiret*/, false, 1 /*allowFailures*/, false) + + encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "", + /*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false, + /*searchFossils*/ false /*resurrect*/, false /*rewiret*/, false, 1 /*allowFailures*/, false) - encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1}, /*tag*/ "", - /*showStatistics*/ true, /*showTabular*/ false, /*checkFiles*/ true, /*checkChunks*/ false, - /*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/ false) - // check functions checkAllUncorrupted := func(cmpRepository string) { - for _, f := range []string{"file1", "file2", "dir1/file3"} { - if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) { - t.Errorf("File %s does not exist", f) - continue - } - - hash1 := getFileHash(testDir + "/repository1/" + f) - hash2 := getFileHash(testDir + cmpRepository + "/" + f) - if hash1 != hash2 { - t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) - } - } - } - checkMissingFile := func(cmpRepository string, expectMissing string) { - for _, f := range []string{"file1", "file2", "dir1/file3"} { - _, err := os.Stat(testDir + cmpRepository + "/" + f) - if err==nil { - if f==expectMissing { - t.Errorf("File %s exists, expected to be missing", f) - } - continue - } - if os.IsNotExist(err) { - if f!=expectMissing { - t.Errorf("File %s does not exist", f) - } - continue - } - - hash1 := getFileHash(testDir + "/repository1/" + f) - hash2 := getFileHash(testDir + cmpRepository + "/" + f) - if hash1 != hash2 { - t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) - } - } - } - checkCorruptedFile := func(cmpRepository string, expectCorrupted string) { for _, f := range []string{"file1", "file2", "dir1/file3"} { if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) { t.Errorf("File %s does not exist", f) continue } - + hash1 := getFileHash(testDir + "/repository1/" + f) hash2 := getFileHash(testDir + cmpRepository + "/" + f) - if (f==expectCorrupted) { + if hash1 != hash2 { + t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) + } + } + } + checkMissingFile := func(cmpRepository string, expectMissing string) { + for _, f := range []string{"file1", "file2", "dir1/file3"} { + _, err := os.Stat(testDir + cmpRepository + "/" + f) + if err == nil { + if f == expectMissing { + t.Errorf("File %s exists, expected to be missing", f) + } + continue + } + if os.IsNotExist(err) { + if f != expectMissing { + t.Errorf("File %s does not exist", f) + } + continue + } + + hash1 := getFileHash(testDir + "/repository1/" + f) + hash2 := getFileHash(testDir + cmpRepository + "/" + f) + if hash1 != hash2 { + t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) + } + } + } + checkCorruptedFile := func(cmpRepository string, expectCorrupted string) { + for _, f := range []string{"file1", "file2", "dir1/file3"} { + if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) { + t.Errorf("File %s does not exist", f) + continue + } + + hash1 := getFileHash(testDir + "/repository1/" + f) + hash2 := getFileHash(testDir + cmpRepository + "/" + f) + if f == expectCorrupted { if hash1 == hash2 { t.Errorf("File %s has same hashes, expected to be corrupted: %s vs %s", f, hash1, hash2) } - + } else { if hash1 != hash2 { t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2) @@ -619,27 +653,36 @@ func TestPersistRestore(t *testing.T) { // test restore all uncorrupted to repository3 SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy") - failedFiles := unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) + failedFiles := unencBackupManager.Restore(testDir+"/repository3", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: false, + Overwrite: false, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: false, + }) assertRestoreFailures(t, failedFiles, 0) checkAllUncorrupted("/repository3") // test for corrupt files and -persist - // corrupt a chunk + // corrupt a chunk chunkToCorrupt1 := "/4d/538e5dfd2b08e782bfeb56d1360fb5d7eb9d8c4b2531cc2fca79efbaec910c" - // this should affect file1 + // this should affect file1 chunkToCorrupt2 := "/2b/f953a766d0196ce026ae259e76e3c186a0e4bcd3ce10f1571d17f86f0a5497" - // this should affect dir1/file3 - + // this should affect dir1/file3 + for i := 0; i < 2; i++ { - if i==0 { + if i == 0 { // test corrupt chunks corruptFile(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1, 128, 128, 4) corruptFile(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2, 128, 128, 4) } else { // test missing chunks - os.Remove(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1) - os.Remove(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2) + os.Remove(testDir + "/unenc_storage" + "/chunks" + chunkToCorrupt1) + os.Remove(testDir + "/enc_storage" + "/chunks" + chunkToCorrupt2) } // This is to make sure that allowFailures is set to true. Note that this is not needed @@ -654,30 +697,46 @@ func TestPersistRestore(t *testing.T) { // check snapshots with --persist (allowFailures == true) // this would cause a panic and os.Exit from duplicacy_log if allowFailures == false - unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1}, /*tag*/ "", - /*showStatistics*/ true, /*showTabular*/ false, /*checkFiles*/ true, /*checkChunks*/ false, - /*searchFossils*/ false, /*resurrect*/ false, /*rewrite*/ false, 1, /*allowFailures*/ true) + unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "", + /*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false, + /*searchFossils*/ false /*resurrect*/, false /*rewrite*/, false, 1 /*allowFailures*/, true) - encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1}, /*tag*/ "", - /*showStatistics*/ true, /*showTabular*/ false, /*checkFiles*/ true, /*checkChunks*/ false, - /*searchFossils*/ false, /*resurrect*/ false, /*rewrite*/ false, 1, /*allowFailures*/ true) + encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "", + /*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false, + /*searchFossils*/ false /*resurrect*/, false /*rewrite*/, false, 1 /*allowFailures*/, true) - // test restore corrupted, inPlace = true, corrupted files will have hash failures - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") - failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) + failedFiles = unencBackupManager.Restore(testDir+"/repository2", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: false, + Overwrite: false, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: true, + }) assertRestoreFailures(t, failedFiles, 1) // check restore, expect file1 to be corrupted checkCorruptedFile("/repository2", "file1") - - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") - failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) + failedFiles = encBackupManager.Restore(testDir+"/repository2", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: false, + Overwrite: false, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: true, + }) assertRestoreFailures(t, failedFiles, 1) // check restore, expect file3 to be corrupted @@ -685,20 +744,37 @@ func TestPersistRestore(t *testing.T) { //SetLoggingLevel(DEBUG) // test restore corrupted, inPlace = false, corrupted files will be missing - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") - failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) + failedFiles = unencBackupManager.Restore(testDir+"/repository2", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: false, + QuickMode: false, + Overwrite: false, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: true, + }) assertRestoreFailures(t, failedFiles, 1) // check restore, expect file1 to be corrupted checkMissingFile("/repository2", "file1") - - os.RemoveAll(testDir+"/repository2") + os.RemoveAll(testDir + "/repository2") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") - failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) + failedFiles = encBackupManager.Restore(testDir+"/repository2", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: false, + QuickMode: false, + Overwrite: false, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: true, + }) assertRestoreFailures(t, failedFiles, 1) // check restore, expect file3 to be corrupted @@ -707,28 +783,64 @@ func TestPersistRestore(t *testing.T) { // test restore corrupted files from different backups, inPlace = true // with overwrite=true, corrupted file1 from unenc will be restored correctly from enc // the latter will not touch the existing file3 with correct hash - os.RemoveAll(testDir+"/repository2") - failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) + os.RemoveAll(testDir + "/repository2") + failedFiles = unencBackupManager.Restore(testDir+"/repository2", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: false, + Overwrite: false, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: true, + }) assertRestoreFailures(t, failedFiles, 1) - failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) + failedFiles = encBackupManager.Restore(testDir+"/repository2", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: false, + Overwrite: true, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: true, + }) assertRestoreFailures(t, failedFiles, 0) checkAllUncorrupted("/repository2") // restore to repository3, with overwrite and allowFailures (true/false), quickMode = false (use hashes) // should always succeed as uncorrupted files already exist with correct hash, so these will be ignored SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy") - failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) + failedFiles = unencBackupManager.Restore(testDir+"/repository3", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: false, + Overwrite: true, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: false, + }) assertRestoreFailures(t, failedFiles, 0) checkAllUncorrupted("/repository3") - failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, - /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) + failedFiles = unencBackupManager.Restore(testDir+"/repository3", 1, RestoreOptions{ + Threads: threads, + Patterns: nil, + InPlace: true, + QuickMode: false, + Overwrite: true, + DeleteMode: false, + SetOwner: false, + ShowStatistics: false, + AllowFailures: true, + }) assertRestoreFailures(t, failedFiles, 0) checkAllUncorrupted("/repository3") } -} \ No newline at end of file +}