mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-07 22:24:46 -06:00
Compare commits
25 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
799b040913 | ||
|
|
41e3843bfa | ||
|
|
9e1d2ac1e6 | ||
|
|
bc40498d1b | ||
|
|
446bb4bcc8 | ||
|
|
150ea13a0d | ||
|
|
8c5b7d5f63 | ||
|
|
315dfff7d6 | ||
|
|
0bc475ca4d | ||
|
|
a0fa0fe7da | ||
|
|
01db72080c | ||
|
|
22ddc04698 | ||
|
|
2aa3b2b737 | ||
|
|
76f75cb0cb | ||
|
|
ea4c4339e6 | ||
|
|
fa294eabf4 | ||
|
|
0ec262fd93 | ||
|
|
db3e0946bb | ||
|
|
c426bf5af2 | ||
|
|
823b82060c | ||
|
|
4308e3e6e9 | ||
|
|
0391ecf941 | ||
|
|
7ecf895d85 | ||
|
|
a43114da99 | ||
|
|
caaff6b4b2 |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
.idea
|
||||
duplicacy_main
|
||||
|
||||
19
GUIDE.md
19
GUIDE.md
@@ -133,7 +133,7 @@ OPTIONS:
|
||||
-t <tag> list snapshots with the specified tag
|
||||
-files print the file list in each snapshot
|
||||
-chunks print chunks in each snapshot or all chunks if no snapshot specified
|
||||
-reset-password take passwords from input rather than keychain/keyring or env
|
||||
-reset-passwords take passwords from input rather than keychain/keyring or env
|
||||
-storage <storage name> retrieve snapshots from the specified storage
|
||||
```
|
||||
|
||||
@@ -458,9 +458,24 @@ destination storage and is required.
|
||||
|
||||
An include pattern starts with +, and an exclude pattern starts with -. Patterns may contain wildcard characters * which matches a path string of any length, and ? matches a single character. Note that both * and ? will match any character including the path separator /.
|
||||
|
||||
The path separator is always /, even on Windows.
|
||||
|
||||
When matching a path against a list of patterns, the path is compared with the part after + or -, one pattern at a time. Therefore, the order of the patterns is significant. If a match with an include pattern is found, the path is said to be included without further comparisons. If a match with an exclude pattern is found, the path is said to be excluded without further comparison. If a match is not found, the path will be excluded if all patterns are include patterns, but included otherwise.
|
||||
|
||||
Patterns ending with a / apply to directories only, and patterns not ending with a / apply to files only. When a directory is excluded, all files and subdirectires under it will also be excluded. Note that the path separator is always /, even on Windows.
|
||||
Patterns ending with a / apply to directories only, and patterns not ending with a / apply to files only. Patterns ending with * and ?, however, apply to both directories and files. When a directory is excluded, all files and subdirectories under it will also be excluded. Therefore, to include a subdirectory, all parent directories must be explicitly included. For instance, the following pattern list doesn't do what is intended, since the `foo` directory will be excluded so the `foo/bar` will never be visited:
|
||||
|
||||
```
|
||||
+foo/bar/*
|
||||
-*
|
||||
```
|
||||
|
||||
The correct way is to include `foo` as well:
|
||||
|
||||
```
|
||||
+foo/bar/*
|
||||
+foo/
|
||||
-*
|
||||
```
|
||||
|
||||
The following pattern list includes only files under the directory foo/ but not files under the subdirectory foo/bar:
|
||||
|
||||
|
||||
@@ -2,4 +2,5 @@ Copyright © 2017 Acrosync LLC
|
||||
|
||||
* Free for personal use or commercial trial
|
||||
* Non-trial commercial use requires per-user licenses available from [duplicacy.com](https://duplicacy.com/customer) at a cost of $20 per year
|
||||
* Commercial licenses are not required to restore or manage backups; only the backup command requires a valid commercial license
|
||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||
|
||||
24
README.md
24
README.md
@@ -168,6 +168,27 @@ Storage URL: s3://amazon.com/bucket/path/to/storage (default region is us-east-
|
||||
|
||||
You'll need to input an access key and a secret key to access your Amazon S3 storage.
|
||||
|
||||
Minio-based S3 compatiable storages are also supported by using the `minio` or `minios` backends:
|
||||
```
|
||||
Storage URL: minio://region@host/bucket/path/to/storage (without TLS)
|
||||
Storage URL: minios://region@host/bucket/path/to/storage (with TLS)
|
||||
```
|
||||
|
||||
There is another backend that works with S3 compatible storage providers that require V2 signing:
|
||||
```
|
||||
Storage URL: s3c://region@host/bucket/path/to/storage
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
<details> <summary>Wasabi</summary>
|
||||
|
||||
```
|
||||
Storage URL: s3://us-east-1@s3.wasabisys.com/bucket/path/to/storage
|
||||
```
|
||||
|
||||
[Wasabi](https://wasabi.com) is a relatively new cloud storage service providing a S3-compatible API.
|
||||
It is well suited for storing backups, because it is much cheaper than Amazon S3 with a storage cost of $.0039/GB/Month and a download fee of $0.04/GB, and no additional charges on API calls.
|
||||
|
||||
</details>
|
||||
|
||||
@@ -201,7 +222,7 @@ Storage URL: b2://bucket
|
||||
|
||||
You'll need to input the account id and application key.
|
||||
|
||||
Backblaze's B2 storage is not only the least expensive (at 0.5 cent per GB per month), but also the fastest. We have been working closely with their developers to leverage the full potentials provided by the B2 API in order to maximize the transfer speed.
|
||||
Backblaze's B2 storage is one of the least expensive (at 0.5 cent per GB per month, with a download fee of 2 cents per GB, plus additional charges for API calls).
|
||||
|
||||
</details>
|
||||
|
||||
@@ -303,4 +324,5 @@ For more details and other speed comparison results, please visit https://github
|
||||
|
||||
* Free for personal use or commercial trial
|
||||
* Non-trial commercial use requires per-user licenses available from [duplicacy.com](https://duplicacy.com/customer) at a cost of $20 per year
|
||||
* Commercial licenses are not required to restore or manage backups; only the backup command requires a valid commercial license
|
||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||
|
||||
@@ -534,7 +534,9 @@ func changePassword(context *cli.Context) {
|
||||
|
||||
password := ""
|
||||
if preference.Encrypted {
|
||||
password = duplicacy.GetPassword(*preference, "password", "Enter old password for storage %s:", false, true)
|
||||
password = duplicacy.GetPassword(*preference, "password",
|
||||
fmt.Sprintf("Enter old password for storage %s:", preference.StorageURL),
|
||||
false, true)
|
||||
}
|
||||
|
||||
config, _, err := duplicacy.DownloadConfig(storage, password)
|
||||
@@ -1683,7 +1685,7 @@ func main() {
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "2.0.6"
|
||||
app.Version = "2.0.7"
|
||||
|
||||
// If the program is interrupted, call the RunAtError function.
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
@@ -14,14 +14,13 @@ import (
|
||||
type AzureStorage struct {
|
||||
RateLimitedStorage
|
||||
|
||||
clients []*storage.BlobStorageClient
|
||||
container string
|
||||
containers []*storage.Container
|
||||
}
|
||||
|
||||
func CreateAzureStorage(accountName string, accountKey string,
|
||||
container string, threads int) (azureStorage *AzureStorage, err error) {
|
||||
containerName string, threads int) (azureStorage *AzureStorage, err error) {
|
||||
|
||||
var clients []*storage.BlobStorageClient
|
||||
var containers []*storage.Container
|
||||
for i := 0; i < threads; i++ {
|
||||
|
||||
client, err := storage.NewBasicClient(accountName, accountKey)
|
||||
@@ -31,21 +30,21 @@ func CreateAzureStorage(accountName string, accountKey string,
|
||||
}
|
||||
|
||||
blobService := client.GetBlobService()
|
||||
clients = append(clients, &blobService)
|
||||
container := blobService.GetContainerReference(containerName)
|
||||
containers = append(containers, container)
|
||||
}
|
||||
|
||||
exist, err := clients[0].ContainerExists(container)
|
||||
exist, err := containers[0].Exists()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("container %s does not exist", container)
|
||||
return nil, fmt.Errorf("container %s does not exist", containerName)
|
||||
}
|
||||
|
||||
azureStorage = &AzureStorage {
|
||||
clients: clients,
|
||||
container: container,
|
||||
containers: containers,
|
||||
}
|
||||
|
||||
return
|
||||
@@ -77,7 +76,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
||||
|
||||
for {
|
||||
|
||||
results, err := azureStorage.clients[threadIndex].ListBlobs(azureStorage.container, parameters)
|
||||
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -115,14 +114,15 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
||||
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
_, err = storage.clients[threadIndex].DeleteBlobIfExists(storage.container, filePath)
|
||||
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// MoveFile renames the file.
|
||||
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
source := storage.clients[threadIndex].GetBlobURL(storage.container, from)
|
||||
err = storage.clients[threadIndex].CopyBlob(storage.container, to, source)
|
||||
source := storage.containers[threadIndex].GetBlobReference(from)
|
||||
destination := storage.containers[threadIndex].GetBlobReference(to)
|
||||
err = destination.Copy(source.GetURL(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -136,7 +136,8 @@ func (storage *AzureStorage) CreateDirectory(threadIndex int, dir string) (err e
|
||||
|
||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||
properties, err := storage.clients[threadIndex].GetBlobProperties(storage.container, filePath)
|
||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||
err = blob.GetProperties(nil)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "404") {
|
||||
return false, false, 0, nil
|
||||
@@ -145,7 +146,7 @@ func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exis
|
||||
}
|
||||
}
|
||||
|
||||
return true, false, properties.ContentLength, nil
|
||||
return true, false, blob.Properties.ContentLength, nil
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
@@ -167,21 +168,22 @@ func (storage *AzureStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
readCloser, err := storage.clients[threadIndex].GetBlob(storage.container, filePath)
|
||||
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.clients))
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.containers))
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.clients))
|
||||
return storage.clients[threadIndex].CreateBlockBlobFromReader(storage.container, filePath, uint64(len(content)), reader, nil)
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.containers))
|
||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||
return blob.CreateBlockBlobFromReader(reader, nil)
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -76,7 +76,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||
preferencePath := GetDuplicacyPreferencePath()
|
||||
cacheDir := path.Join(preferencePath, "cache", storageName)
|
||||
|
||||
storage, err := CreateFileStorage(cacheDir, 1)
|
||||
storage, err := CreateFileStorage(cacheDir, 2, false, 1)
|
||||
if err != nil {
|
||||
LOG_ERROR("BACKUP_CACHE", "Failed to create the snapshot cache dir: %v", err)
|
||||
return false
|
||||
@@ -918,7 +918,9 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
|
||||
|
||||
if deleteMode && len(patterns) == 0 {
|
||||
for _, file := range extraFiles {
|
||||
// Reverse the order to make sure directories are empty before being deleted
|
||||
for i := range extraFiles {
|
||||
file := extraFiles[len(extraFiles) - 1 - i]
|
||||
fullPath := joinPath(top, file)
|
||||
os.Remove(fullPath)
|
||||
LOG_INFO("RESTORE_DELETE", "Deleted %s", file)
|
||||
@@ -932,8 +934,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
}
|
||||
}
|
||||
|
||||
RemoveEmptyDirectories(top)
|
||||
|
||||
if showStatistics {
|
||||
for _, file := range downloadedFiles {
|
||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size)
|
||||
@@ -1196,13 +1196,17 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
fileHasher := manager.config.NewFileHasher()
|
||||
buffer := make([]byte, 64 * 1024)
|
||||
err = nil
|
||||
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
|
||||
// We set to read one more byte so the file hash will be different if the file to be restored is a
|
||||
// truncated portion of the existing file
|
||||
for i := entry.StartChunk; i <= entry.EndChunk + 1; i++ {
|
||||
hasher := manager.config.NewKeyedHasher(manager.config.HashKey)
|
||||
chunkSize := chunkDownloader.taskList[i].chunkLength
|
||||
chunkSize := 1 // the size of extra chunk beyond EndChunk
|
||||
if i == entry.StartChunk {
|
||||
chunkSize -= entry.StartOffset
|
||||
} else if i == entry.EndChunk {
|
||||
chunkSize = entry.EndOffset
|
||||
} else if i > entry.StartChunk && i < entry.EndChunk {
|
||||
chunkSize = chunkDownloader.taskList[i].chunkLength
|
||||
}
|
||||
count := 0
|
||||
for count < chunkSize {
|
||||
|
||||
@@ -104,6 +104,27 @@ func modifyFile(path string, portion float32) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkExistence(t *testing.T, path string, exists bool, isDir bool) {
|
||||
stat, err := os.Stat(path)
|
||||
if exists {
|
||||
if err != nil {
|
||||
t.Errorf("%s does not exist: %v", path, err)
|
||||
} else if isDir {
|
||||
if !stat.Mode().IsDir() {
|
||||
t.Errorf("%s is not a directory", path)
|
||||
}
|
||||
} else {
|
||||
if stat.Mode().IsDir() {
|
||||
t.Errorf("%s is not a file", path)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err == nil || !os.IsNotExist(err) {
|
||||
t.Errorf("%s may exist: %v", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func truncateFile(path string) {
|
||||
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
@@ -173,6 +194,9 @@ func TestBackupManager(t *testing.T) {
|
||||
|
||||
os.Mkdir(testDir + "/repository1", 0700)
|
||||
os.Mkdir(testDir + "/repository1/dir1", 0700)
|
||||
os.Mkdir(testDir + "/repository1/.duplicacy", 0700)
|
||||
os.Mkdir(testDir + "/repository2", 0700)
|
||||
os.Mkdir(testDir + "/repository2/.duplicacy", 0700)
|
||||
|
||||
maxFileSize := 1000000
|
||||
//maxFileSize := 200000
|
||||
@@ -215,14 +239,14 @@ func TestBackupManager(t *testing.T) {
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager := CreateBackupManager("host1", storage, testDir, password)
|
||||
backupManager.SetupSnapshotCache("default")
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir + "/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
|
||||
@@ -243,10 +267,10 @@ func TestBackupManager(t *testing.T) {
|
||||
modifyFile(testDir + "/repository1/file2", 0.2)
|
||||
modifyFile(testDir + "/repository1/dir1/file3", 0.3)
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir + "/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
|
||||
@@ -258,13 +282,25 @@ func TestBackupManager(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Truncate file2 and add a few empty directories
|
||||
truncateFile(testDir + "/repository1/file2")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1")
|
||||
os.Mkdir(testDir + "/repository1/dir2", 0700)
|
||||
os.Mkdir(testDir + "/repository1/dir2/dir3", 0700)
|
||||
os.Mkdir(testDir + "/repository1/dir4", 0700)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2")
|
||||
|
||||
// Create some directories and files under repository2 that will be deleted during restore
|
||||
os.Mkdir(testDir + "/repository2/dir5", 0700)
|
||||
os.Mkdir(testDir + "/repository2/dir5/dir6", 0700)
|
||||
os.Mkdir(testDir + "/repository2/dir7", 0700)
|
||||
createRandomFile(testDir + "/repository2/file4", 100)
|
||||
createRandomFile(testDir + "/repository2/dir5/file5", 100)
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir + "/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
/*deleteMode=*/true, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
|
||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
@@ -274,9 +310,22 @@ func TestBackupManager(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// These files/dirs should not exist because deleteMode == true
|
||||
checkExistence(t, testDir + "/repository2/dir5", false, false);
|
||||
checkExistence(t, testDir + "/repository2/dir5/dir6", false, false);
|
||||
checkExistence(t, testDir + "/repository2/dir7", false, false);
|
||||
checkExistence(t, testDir + "/repository2/file4", false, false);
|
||||
checkExistence(t, testDir + "/repository2/dir5/file5", false, false);
|
||||
|
||||
// These empty dirs should exist
|
||||
checkExistence(t, testDir + "/repository2/dir2", true, true);
|
||||
checkExistence(t, testDir + "/repository2/dir2/dir3", true, true);
|
||||
checkExistence(t, testDir + "/repository2/dir4", true, true);
|
||||
|
||||
// Remove file2 and dir1/file3 and restore them from revision 3
|
||||
os.Remove(testDir + "/repository1/file2")
|
||||
os.Remove(testDir + "/repository1/dir1/file3")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Restore(testDir + "/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
||||
|
||||
|
||||
@@ -314,7 +314,11 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
|
||||
if !exist {
|
||||
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||
if err != nil {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||
} else {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||
}
|
||||
return false
|
||||
}
|
||||
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
|
||||
|
||||
@@ -18,19 +18,25 @@ import (
|
||||
type FileStorage struct {
|
||||
RateLimitedStorage
|
||||
|
||||
minimumLevel int // The minimum level of directories to dive into before searching for the chunk file.
|
||||
isCacheNeeded bool // Network storages require caching
|
||||
storageDir string
|
||||
numberOfThreads int
|
||||
}
|
||||
|
||||
// CreateFileStorage creates a file storage.
|
||||
func CreateFileStorage(storageDir string, threads int) (storage *FileStorage, err error) {
|
||||
func CreateFileStorage(storageDir string, minimumLevel int, isCacheNeeded bool, threads int) (storage *FileStorage, err error) {
|
||||
|
||||
var stat os.FileInfo
|
||||
|
||||
stat, err = os.Stat(storageDir)
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(storageDir, 0744)
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(storageDir, 0744)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
@@ -45,6 +51,8 @@ func CreateFileStorage(storageDir string, threads int) (storage *FileStorage, er
|
||||
|
||||
storage = &FileStorage {
|
||||
storageDir : storageDir,
|
||||
minimumLevel: minimumLevel,
|
||||
isCacheNeeded: isCacheNeeded,
|
||||
numberOfThreads: threads,
|
||||
}
|
||||
|
||||
@@ -128,16 +136,18 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
||||
suffix = ".fsl"
|
||||
}
|
||||
|
||||
// The minimum level of directories to dive into before searching for the chunk file.
|
||||
minimumLevel := 2
|
||||
|
||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
||||
if level >= minimumLevel {
|
||||
if level >= storage.minimumLevel {
|
||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
||||
if stat, err := os.Stat(filePath); err == nil && !stat.IsDir() {
|
||||
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
|
||||
// really a need to follow the link if filePath is a link.
|
||||
stat, err := os.Lstat(filePath)
|
||||
if err != nil {
|
||||
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
|
||||
} else if stat.IsDir() {
|
||||
return filePath[len(storage.storageDir) + 1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||
} else {
|
||||
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
||||
} else if err == nil && stat.IsDir() {
|
||||
return filePath[len(storage.storageDir) + 1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -149,7 +159,7 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
||||
continue
|
||||
}
|
||||
|
||||
if level < minimumLevel {
|
||||
if level < storage.minimumLevel {
|
||||
// Create the subdirectory if it doesn't exist.
|
||||
|
||||
if err == nil && !stat.IsDir() {
|
||||
@@ -164,7 +174,6 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
||||
return "", false, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
dir = subDir
|
||||
continue
|
||||
}
|
||||
@@ -174,9 +183,7 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
||||
|
||||
}
|
||||
|
||||
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
||||
chunkID)
|
||||
return "", false, 0, nil
|
||||
return "", false, 0, fmt.Errorf("The maximum level of directories searched")
|
||||
|
||||
}
|
||||
|
||||
@@ -241,7 +248,7 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *FileStorage) IsCacheNeeded () (bool) { return false }
|
||||
func (storage *FileStorage) IsCacheNeeded () (bool) { return storage.isCacheNeeded }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
|
||||
@@ -95,14 +95,14 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
|
||||
os.RemoveAll(testDir)
|
||||
os.MkdirAll(testDir, 0700)
|
||||
|
||||
storage, _ := CreateFileStorage(testDir, 1)
|
||||
storage, _ := CreateFileStorage(testDir, 2, false, 1)
|
||||
storage.CreateDirectory(0, "chunks")
|
||||
storage.CreateDirectory(0, "snapshots")
|
||||
config := CreateConfig()
|
||||
snapshotManager := CreateSnapshotManager(config, storage)
|
||||
|
||||
cacheDir := path.Join(testDir, "cache")
|
||||
snapshotCache, _ := CreateFileStorage(cacheDir, 1)
|
||||
snapshotCache, _ := CreateFileStorage(cacheDir, 2, false, 1)
|
||||
snapshotCache.CreateDirectory(0, "chunks")
|
||||
snapshotCache.CreateDirectory(0, "snapshots")
|
||||
|
||||
|
||||
@@ -127,6 +127,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
storageURL := preference.StorageURL
|
||||
|
||||
isFileStorage := false
|
||||
isCacheNeeded := false
|
||||
|
||||
if strings.HasPrefix(storageURL, "/") {
|
||||
isFileStorage = true
|
||||
@@ -140,11 +141,30 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
|
||||
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
|
||||
isFileStorage = true
|
||||
isCacheNeeded = true
|
||||
}
|
||||
}
|
||||
|
||||
if isFileStorage {
|
||||
fileStorage, err := CreateFileStorage(storageURL, threads)
|
||||
fileStorage, err := CreateFileStorage(storageURL, 2, isCacheNeeded, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
return fileStorage
|
||||
}
|
||||
|
||||
if strings.HasPrefix(storageURL, "flat://") {
|
||||
fileStorage, err := CreateFileStorage(storageURL[7:], 0, false, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
return fileStorage
|
||||
}
|
||||
|
||||
if strings.HasPrefix(storageURL, "samba://") {
|
||||
fileStorage, err := CreateFileStorage(storageURL[8:], 2, true, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
|
||||
@@ -41,7 +41,7 @@ func init() {
|
||||
func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||
|
||||
if testStorageName == "" || testStorageName == "file" {
|
||||
return CreateFileStorage(localStoragePath, threads)
|
||||
return CreateFileStorage(localStoragePath, 2, false, threads)
|
||||
}
|
||||
|
||||
config, err := ioutil.ReadFile("test_storage.conf")
|
||||
@@ -61,10 +61,14 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
|
||||
}
|
||||
|
||||
if testStorageName == "sftp" {
|
||||
if testStorageName == "flat" {
|
||||
return CreateFileStorage(localStoragePath, 0, false, threads)
|
||||
} else if testStorageName == "samba" {
|
||||
return CreateFileStorage(localStoragePath, 2, true, threads)
|
||||
} else if testStorageName == "sftp" {
|
||||
port, _ := strconv.Atoi(storage["port"])
|
||||
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
|
||||
} else if testStorageName == "s3" {
|
||||
} else if testStorageName == "s3" || testStorageName == "wasabi" {
|
||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
|
||||
} else if testStorageName == "s3c" {
|
||||
return CreateS3CStorage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
|
||||
@@ -454,3 +458,64 @@ func TestStorage(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCleanStorage(t *testing.T) {
|
||||
setTestingT(t)
|
||||
SetLoggingLevel(INFO)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
switch e := r.(type) {
|
||||
case Exception:
|
||||
t.Errorf("%s %s", e.LogID, e.Message)
|
||||
debug.PrintStack()
|
||||
default:
|
||||
t.Errorf("%v", e)
|
||||
debug.PrintStack()
|
||||
}
|
||||
}
|
||||
} ()
|
||||
|
||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||
os.RemoveAll(testDir)
|
||||
os.MkdirAll(testDir, 0700)
|
||||
|
||||
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||
|
||||
storage, err := loadStorage(testDir, 1)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create storage: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
directories := make([]string, 0, 1024)
|
||||
directories = append(directories, "snapshots/")
|
||||
directories = append(directories, "chunks/")
|
||||
|
||||
for len(directories) > 0 {
|
||||
|
||||
dir := directories[len(directories) - 1]
|
||||
directories = directories[:len(directories) - 1]
|
||||
|
||||
LOG_INFO("LIST_FILES", "Listing %s", dir)
|
||||
|
||||
files, _, err := storage.ListFiles(0, dir)
|
||||
if err != nil {
|
||||
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if len(file) > 0 && file[len(file) - 1] == '/' {
|
||||
directories = append(directories, dir + file)
|
||||
} else {
|
||||
storage.DeleteFile(0, dir + file)
|
||||
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
storage.DeleteFile(0, "config")
|
||||
LOG_INFO("DELETE_FILE", "Deleted config")
|
||||
|
||||
}
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"os"
|
||||
"bufio"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -190,54 +189,6 @@ func SavePassword(preference Preference, passwordType string, password string) {
|
||||
keyringSet(passwordID, password)
|
||||
}
|
||||
|
||||
// RemoveEmptyDirectories remove all empty subdirectoreies under top.
|
||||
func RemoveEmptyDirectories(top string) {
|
||||
|
||||
stack := make([]string, 0, 256)
|
||||
|
||||
stack = append(stack, top)
|
||||
|
||||
for len(stack) > 0 {
|
||||
|
||||
dir := stack[len(stack) - 1]
|
||||
stack = stack[:len(stack) - 1]
|
||||
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() && file.Name()[0] != '.' {
|
||||
stack = append(stack, path.Join(dir, file.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
if os.Remove(dir) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
dir = path.Dir(dir)
|
||||
for (len(dir) > len(top)) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
if os.Remove(dir) != nil {
|
||||
break;
|
||||
}
|
||||
}
|
||||
dir = path.Dir(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
|
||||
// Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd'
|
||||
// against '*ccd', and the version here fixed that issue.
|
||||
|
||||
Reference in New Issue
Block a user