Compare commits

..

113 Commits

Author SHA1 Message Date
Gilbert Chen
799b040913 Add Wasabi storage to tests 2017-08-12 11:25:25 -04:00
gilbertchen
41e3843bfa Update README.md 2017-08-12 10:59:00 -04:00
gilbertchen
9e1d2ac1e6 Merge pull request #110 from clbn/patch-1
Update README.md
2017-08-12 10:51:44 -04:00
Alex Olshansky
bc40498d1b Update README.md
Fixes typo ("serivce" -> "service") and factual error (B2 is not the least expensive since Wasabi was added).
2017-08-12 15:24:27 +02:00
Gilbert Chen
446bb4bcc8 Add a pseudo test to clean the storage 2017-08-11 13:15:59 -04:00
Gilbert Chen
150ea13a0d Fixed a build error in SnapshotManager tests caused by changes in CreateFileStorage 2017-08-09 12:12:21 -04:00
Gilbert Chen
8c5b7d5f63 Fixed Azure storage after updating gilbertchen/azure-sdk-for-g 2017-08-09 00:14:25 -04:00
Gilbert Chen
315dfff7d6 Add caching to network drives 2017-08-08 23:10:22 -04:00
Gilbert Chen
0bc475ca4d Allow backups to be restore and managed without a license 2017-08-05 21:24:05 -04:00
Gilbert Chen
a0fa0fe7da Fixed #101: show storage name correctly in the password command 2017-08-05 12:30:13 -04:00
gilbertchen
01db72080c Update GUIDE.md 2017-08-05 11:52:17 -04:00
Gilbert Chen
22ddc04698 Restore empty directories 2017-08-05 10:56:15 -04:00
Gilbert Chen
2aa3b2b737 Fixed a chunk not found error if the storage is a Windows network share with deduplication on 2017-08-02 22:04:22 -04:00
Gilbert Chen
76f75cb0cb Merge branch 'master' of https://github.com/gilbertchen/duplicacy 2017-08-01 23:09:18 -04:00
Gilbert Chen
ea4c4339e6 Bump version to 2.0.7 2017-08-01 23:08:57 -04:00
Gilbert Chen
fa294eabf4 When a chunk can't be found, print the error if it is not nil 2017-08-01 23:08:11 -04:00
gilbertchen
0ec262fd93 Merge pull request #102 from whereisaaron/patch-1
Update option name reset-password -> reset-passwords
2017-07-27 23:55:41 -04:00
Gilbert Chen
db3e0946bb Fixed a bug that caused a truncated file not to be restored correctly 2017-07-27 23:27:59 -04:00
Gilbert Chen
c426bf5af2 Merge branch 'master' of https://github.com/gilbertchen/duplicacy 2017-07-27 22:43:00 -04:00
Gilbert Chen
823b82060c Add a storage prefix flat:// that can handle a flat chunk directory 2017-07-27 22:42:48 -04:00
Aaron Roydhouse
4308e3e6e9 Update option name reset-password -> reset-passwords 2017-07-27 16:38:48 -04:00
gilbertchen
0391ecf941 Update README.md 2017-07-25 23:48:52 -04:00
gilbertchen
7ecf895d85 Update README.md 2017-07-25 23:48:13 -04:00
gilbertchen
a43114da99 Update README.md 2017-07-25 23:47:50 -04:00
gilbertchen
caaff6b4b2 Add doc for minio and s3c 2017-07-24 14:01:10 -04:00
gilbertchen
18964e89a1 Add missing dependencies 2017-07-21 15:29:48 -04:00
Gilbert Chen
2d1ea86d8e Calculate file hash during in-place restore 2017-07-21 15:19:11 -04:00
Gilbert Chen
d881ac9169 Sparse file support: create an empty sparse file for in-place restore 2017-07-20 23:08:55 -04:00
Gilbert Chen
1aee9bd6ef Retain the error message in the SFTP rename error 2017-07-20 10:19:11 -04:00
Gilbert Chen
f3447bb611 Improve OneDrive backend by retrying on more errors 2017-07-19 23:39:54 -04:00
Gilbert Chen
9be4927c87 Set preference path before backup and restore 2017-07-19 15:15:19 -04:00
Gilbert Chen
a0fcb8802b Fixed typos 2017-07-17 22:35:12 -04:00
gilbertchen
58cfeec6ab Update doc for include/exclude patterns 2017-07-17 13:15:58 -04:00
Gilbert Chen
0d442e736d ChunkMaker should return the chunk once it is filled 2017-07-14 23:56:40 -04:00
Gilbert Chen
b32bda162d Fixed #25: don't error out when a file can't be found in a revision 2017-07-14 15:23:00 -04:00
gilbertchen
e6767bfad4 Update LICENSE.md 2017-07-13 23:42:58 -04:00
gilbertchen
0b9e23fcd8 Update README.md 2017-07-13 23:42:28 -04:00
Gilbert Chen
7f04a79111 Replace Fair Source 5 with our own free-for-personal-use license 2017-07-13 23:33:14 -04:00
Gilbert Chen
211c6867d3 Bump version to 2.0.6 2017-07-13 15:04:56 -04:00
Gilbert Chen
4a31fcfb68 Set file sizes to -1 before creating the file reader 2017-07-13 15:00:27 -04:00
Gilbert Chen
6a4b1f2a3f Add minios to test storages 2017-07-13 14:06:36 -04:00
Gilbert Chen
483ae5e6eb Add minios:// for minio servers with SSL support 2017-07-12 21:09:19 -04:00
Gilbert Chen
f8d879d414 Add a s3c backend to support s3 compatible storages that require V2 Signing 2017-07-11 21:27:20 -04:00
Gilbert Chen
c2120ad3d5 Merge branch 'master' of https://github.com/gilbertchen/duplicacy 2017-07-11 13:45:14 -04:00
Gilbert Chen
f8764a5a79 Make the S3 backend compatible with minio 2017-07-11 13:45:06 -04:00
gilbertchen
736b4da0c3 Update README.md 2017-07-08 21:05:24 -04:00
Gilbert Chen
0aa122609a Fixed #82: force in-place mode with a non-default preference path 2017-07-07 22:21:43 -04:00
gilbertchen
18462cf585 Update README.md 2017-07-07 20:49:53 -04:00
gilbertchen
e06283f0b3 Update GUIDE.md 2017-07-07 20:24:16 -04:00
Gilbert Chen
b4f3142275 Fixed #86: increase timeouts to handle overloaded Hubic servers 2017-07-07 20:08:09 -04:00
Gilbert Chen
cdd1f26079 Merge branch 'master' of https://github.com/gilbertchen/duplicacy 2017-07-07 17:23:12 -04:00
Gilbert Chen
199e312bea Fixed a build error in TestUploaderAndDownloader 2017-07-07 17:22:59 -04:00
gilbertchen
88141216e9 Update GUIDE.md 2017-07-07 15:32:03 -04:00
gilbertchen
f9ede565ff Update README.md 2017-07-07 15:02:08 -04:00
gilbertchen
93a61a6e49 Update README.md 2017-07-07 15:01:10 -04:00
gilbertchen
7d31199631 Update README.md 2017-07-07 14:58:43 -04:00
gilbertchen
f2451911f2 Update README.md 2017-07-07 14:58:10 -04:00
gilbertchen
ac655c8780 Update README.md 2017-07-07 14:57:40 -04:00
gilbertchen
c31d2a30d9 Update README.md 2017-07-07 14:54:36 -04:00
gilbertchen
83da36cae0 Update README.md 2017-07-07 14:54:03 -04:00
gilbertchen
96e2f78096 Update README.md 2017-07-07 14:45:59 -04:00
gilbertchen
593b409329 Update README.md 2017-07-07 14:44:20 -04:00
gilbertchen
5334f45998 Update README.md 2017-07-07 14:43:07 -04:00
gilbertchen
b56baa80c3 Update README.md 2017-07-07 14:42:07 -04:00
gilbertchen
74ab8d8c23 Update README.md 2017-07-07 14:38:37 -04:00
gilbertchen
a7613ab7d9 Update README.md 2017-07-07 14:34:11 -04:00
Gilbert Chen
65127c7ab7 Fixed incorrect restore percentage: should use chunk sizes instead of file sizes 2017-07-07 12:53:56 -04:00
Gilbert Chen
09f695b3e1 Verify chunk id for snapshot chunks 2017-07-07 12:08:38 -04:00
Gilbert Chen
2908b807b9 Fixed incorrect stats during backup; also check in files missing from last commit 2017-07-06 23:12:22 -04:00
Gilbert Chen
ba3702647b Fixed #90: unprocessed files may leak into incomplete snapshot leading to incorrect file size 2017-07-06 22:06:51 -04:00
Gilbert Chen
0a149cd509 Bump version to 2.0.5 2017-07-04 15:17:27 -04:00
Gilbert Chen
2cbb72c2d0 Handle 3xx status codes from B2 2017-07-04 15:14:15 -04:00
Gilbert Chen
12134ea6ad Fixed #83: don't pass unchanged files to the chunk downloader 2017-07-04 14:50:34 -04:00
Gilbert Chen
4291bc775b Retry on authentication error for Google Drive 2017-07-02 20:49:56 -04:00
Gilbert Chen
817e36c7a6 Bump version to 2.0.4 2017-06-29 22:22:59 -04:00
Gilbert Chen
b7b54478fc Don't compute file hashes when DUPLICACY_SKIP_FILE_HASH is set; handle vertical backup-style hashes in restore 2017-06-29 22:19:41 -04:00
Gilbert Chen
8d06fa491a Merge branch 'master' of https://github.com/gilbertchen/duplicacy 2017-06-29 13:19:53 -04:00
Gilbert Chen
42a6ab9140 In fixed-size chunking, create a new chunk after returning the old one 2017-06-29 13:11:28 -04:00
gilbertchen
bad990e702 Merge pull request #81 from chbmuc/master
Move error parsing behind status code handling
2017-06-26 11:08:38 -04:00
gilbertchen
d27335ad8d Update README.md 2017-06-23 22:02:15 -04:00
Gilbert Chen
a584828e1b Merge branch 'master' of https://github.com/gilbertchen/duplicacy 2017-06-22 22:53:42 -04:00
Gilbert Chen
d0c376f593 Implement fast resume; refactor GetDuplicacyPreferencePath() 2017-06-22 22:53:33 -04:00
gilbertchen
a54029cf2b Update GUIDE.md 2017-06-22 13:11:04 -04:00
Gilbert Chen
839be6094f Remove unused import 2017-06-20 16:37:47 -04:00
Gilbert Chen
84a4c86ca7 Bump version to 2.0.3 2017-06-20 14:39:04 -04:00
Gilbert Chen
651d82e511 Check directory existence again when failing to create it to avoid erroring out on race condition 2017-06-20 14:38:09 -04:00
Christian Brunner
6a73a62591 Move error parsing behind status code handling
Otherwise request throttling won't work and you will get errors like this:

PUT https://api.onedrive.com/v1.0/drive/root:/dup/chunks/91xxx08:/content
Failed to upload the chunk 91xxx08: 503 Unexpected response
2017-06-16 14:06:14 +02:00
gilbertchen
169d6db544 Create README.md 2017-06-15 16:22:09 -04:00
gilbertchen
25684942b3 Merge pull request #78 from stefandz/patch-2
another tiny typo
2017-06-15 10:49:59 -04:00
gilbertchen
746431d5e0 Merge pull request #77 from stefandz/patch-1
Update GUIDE.md
2017-06-15 10:49:17 -04:00
Gilbert Chen
28da4d15e2 Fixed #76: must create a new chunk for uploading in the copy operation 2017-06-15 10:48:24 -04:00
stefandz
d36e80a5eb another tiny typo 2017-06-15 15:40:28 +01:00
stefandz
fe1de10f22 Update GUIDE.md
Tiny typo
2017-06-15 11:29:52 +01:00
gilbertchen
112d5b22e5 Replace goamz with aws-sdk-g 2017-06-13 15:16:24 -04:00
gilbertchen
3da8830592 Fix typos 2017-06-13 13:29:01 -04:00
gilbertchen
04b01fa87d Merge pull request #73 from sdaros/master
Fix typo
2017-06-13 13:17:05 -04:00
Stefano Da Ros
4b60859054 Fix typo
The file should be titled "known_hosts" instead.
2017-06-13 18:53:59 +02:00
Gilbert Chen
7e5fc0972d Make LICENSE a Markdown file for better viewing 2017-06-13 12:37:05 -04:00
Gilbert Chen
c9951d6036 Move LICENSE to the top directory 2017-06-13 12:36:02 -04:00
Gilbert Chen
92b3594e89 Add a LICENSE file 2017-06-13 12:35:06 -04:00
Gilbert Chen
2424a2eeed Switch from goamz to aws-sdk-go for the S3 storage backend 2017-06-13 12:27:01 -04:00
gilbertchen
2ace6c74e1 Merge pull request #71 from ech1965/pref-dir
add -pref-dir command line option for init subcommand
2017-06-13 11:58:07 -04:00
Etienne Charlier
2fcc4d44b9 Merge branch 'master' of https://github.com/gilbertchen/duplicacy into pref-dir 2017-06-12 19:28:52 +02:00
gilbertchen
3f45b0a15a Update README.md 2017-06-11 14:09:39 -04:00
gilbertchen
2d69f64c20 Create README.md 2017-06-11 14:08:13 -04:00
Gilbert Chen
7a1a541c98 Rename main directory for better support of go get 2017-06-11 14:02:43 -04:00
Etienne Charlier
7aa0eca47c Fix typo 2017-06-11 14:10:14 +02:00
Etienne Charlier
aa909c0c15 Update documentation 2017-06-11 13:48:11 +02:00
Etienne Charlier
9e1740c1d6 Fix merge error 2017-06-10 17:14:58 +02:00
Etienne Charlier
ae34347741 merge version 2.0.2 2017-06-10 17:12:44 +02:00
Etienne Charlier
1361b553ac Remove logging statement; refactor test scripts 2017-06-08 22:21:57 +02:00
Etienne Charlier
c688c501d3 Refactor variable names and revert shadow copy path computation 2017-06-07 21:02:55 +02:00
Etienne Charlier
c88e148d59 First steps -pref-dir 2017-06-05 23:16:11 +02:00
59 changed files with 1798 additions and 538 deletions

View File

@@ -1,4 +1,4 @@
Duplicacy is based on the following open source project:
Duplicacy is based on the following open source projects:
| Projects | License |
|--------|:-------:|
@@ -7,8 +7,10 @@ Duplicacy is based on the following open source project:
|https://github.com/bkaradzic/go-lz4 | BSD-2-Clause |
|https://github.com/Azure/azure-sdk-for-go | Apache-2.0 |
|https://github.com/tj/go-dropbox | MIT |
|https://github.com/goamz/goamz | LGPL-3.0 with static compilation excpetions |
|https://github.com/aws/aws-sdk-go | Apache-2.0 |
|https://github.com/goamz/goamz | LGPL with static link exception |
|https://github.com/howeyc/gopass | ISC |
|https://github.com/tmc/keyring | ISC |
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|https://github.com/go-ole/go-ole | MIT |

View File

@@ -16,6 +16,7 @@ OPTIONS:
-chunk-size, -c 4M the average size of chunks
-max-chunk-size, -max 16M the maximum size of chunks (defaults to chunk-size * 4)
-min-chunk-size, -min 1M the minimum size of chunks (defaults to chunk-size / 4)
-pref-dir <preference directory path> Specify alternate location for .duplicacy preferences directory
```
The *init* command first connects to the storage specified by the storage URL. If the storage has been already been
@@ -27,12 +28,14 @@ for those commands. This default storage actually has a name, *default*.
After that, it will prepare the the current working directory as the repository to be backed up. Under the hood, it will create a directory
named *.duplicacy* in the repository and put a file named *preferences* that stores the snapshot id and encryption and storage options.
The snapshot id is an id used to distinguish different repositories connected to the same storage. Each repository must have a unique snapshot id.
The snapshot id is an id used to distinguish different repositories connected to the same storage. Each repository must have a unique snapshot id. A snapshot id must contain only characters valid in Linux and Windows paths (alphabet, digits, underscore, dash, etc), but cannot include `/`, `\`, or `@`.
The -e option controls whether or not encryption will be enabled for the storage. If encryption is enabled, you will be prompted to enter a storage password.
The three chunk size parameters are passed to the variable-size chunking algorithm. Their values are important to the overall performance, especially for cloud storages. If the chunk size is too small, a lot of overhead will be in sending requests and receiving responses. If the chunk size is too large, the effect of deduplication will be less obvious as more data will need to be transferred with each chunk.
The -pref-dir controls the location of the preferences directory. If not specified, a directory named .duplicacy is created in the repository. If specified, it must point to a non-existing directory. The directory is created and a .duplicacy file is created in the repository. The .duplicacy file contains the absolute path name to the preferences directory.
Once a storage has been initialized with these parameters, these parameters cannot be modified any more.
#### Backup
@@ -130,7 +133,7 @@ OPTIONS:
-t <tag> list snapshots with the specified tag
-files print the file list in each snapshot
-chunks print chunks in each snapshot or all chunks if no snapshot specified
-reset-password take passwords from input rather than keychain/keyring or env
-reset-passwords take passwords from input rather than keychain/keyring or env
-storage <storage name> retrieve snapshots from the specified storage
```
@@ -453,11 +456,26 @@ destination storage and is required.
## Include/Exclude Patterns
An include pattern starts with +, and an exclude pattern starts with -. Patterns may contain wildcard characters such as * and ? with their normal meaning.
An include pattern starts with +, and an exclude pattern starts with -. Patterns may contain wildcard characters * which matches a path string of any length, and ? matches a single character. Note that both * and ? will match any character including the path separator /.
The path separator is always /, even on Windows.
When matching a path against a list of patterns, the path is compared with the part after + or -, one pattern at a time. Therefore, the order of the patterns is significant. If a match with an include pattern is found, the path is said to be included without further comparisons. If a match with an exclude pattern is found, the path is said to be excluded without further comparison. If a match is not found, the path will be excluded if all patterns are include patterns, but included otherwise.
Patterns ending with a / apply to directories only, and patterns not ending with a / apply to files only. When a directory is excluded, all files and subdirectires under it will also be excluded. Note that the path separator is always /, even on Windows.
Patterns ending with a / apply to directories only, and patterns not ending with a / apply to files only. Patterns ending with * and ?, however, apply to both directories and files. When a directory is excluded, all files and subdirectories under it will also be excluded. Therefore, to include a subdirectory, all parent directories must be explicitly included. For instance, the following pattern list doesn't do what is intended, since the `foo` directory will be excluded so the `foo/bar` will never be visited:
```
+foo/bar/*
-*
```
The correct way is to include `foo` as well:
```
+foo/bar/*
+foo/
-*
```
The following pattern list includes only files under the directory foo/ but not files under the subdirectory foo/bar:
@@ -497,6 +515,16 @@ Duplicacy will attempt to retrieve in three ways the storage password and the st
Note that the passwords stored in the environment variable and the preference need to be in plaintext and thus are insecure and should be avoided whenever possible.
## Cache
Duplicacy maintains a local cache under the `.duplicacy/cache` folder in the repository. Only snapshot chunks may be stored in this local cache, and file chunks are never cached.
At the end of a backup operation, Duplicacy will clean up the local cache in such a way that only chunks composing the snapshot file from the last backup will stay in the cache. All other chunks will be removed from the cache. However, if the *prune* command has been run before (which will leave a the `.duplicacy/collection` folder in the repository, then the *backup* command won't perform any cache cleanup and instead defer that to the *prune* command.
At the end of a prune operation, Duplicacy will remove all chunks from the local cache except those composing the snapshot file from the last backup (those that would be kept by the *backup* command), as well as chunks that contain information about chunks referenced by *all* backups from *all* repositories connected to the same storage url.
Other commands, such as *list*, *check*, does not clean up the local cache at all, so the local cache may keep growing if many of these commands run consecutively. However, once a *backup* or a *prune* command is invoked, the local cache should shrink to its normal size.
## Scripts
You can instruct Duplicay to run a script before or after executing a command. For example, if you create a bash script with the name *pre-prune* under the *.duplicacy/scripts* directory, this bash script will be run before the *prune* command starts. A script named *post-prune* will be run after the *prune* command finishes. This rule applies to all commands except *init*.
You can instruct Duplicacy to run a script before or after executing a command. For example, if you create a bash script with the name *pre-prune* under the *.duplicacy/scripts* directory, this bash script will be run before the *prune* command starts. A script named *post-prune* will be run after the *prune* command finishes. This rule applies to all commands except *init*.

6
LICENSE.md Normal file
View File

@@ -0,0 +1,6 @@
Copyright © 2017 Acrosync LLC
* Free for personal use or commercial trial
* Non-trial commercial use requires per-user licenses available from [duplicacy.com](https://duplicacy.com/customer) at a cost of $20 per year
* Commercial licenses are not required to restore or manage backups; only the backup command requires a valid commercial license
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license

139
README.md
View File

@@ -4,6 +4,8 @@ Duplicacy is a new generation cross-platform cloud backup tool based on the idea
The repository hosts source code, design documents, and binary releases of the command line version. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
There is a special edition of Duplicacy developed for VMware vSphere (ESXi) named [Vertical Backup](https://www.verticalbackup.com) that can back up virtual machine files on ESXi to local drives, network or cloud storages.
## Features
Duplicacy currently supports major cloud storage providers (Amazon S3, Google Cloud Storage, Microsoft Azure, Dropbox, Backblaze, Google Drive, Microsoft OneDrive, and Hubic) and offers all essential features of a modern backup tool:
@@ -26,16 +28,21 @@ The [design document](https://github.com/gilbertchen/duplicacy-cli/blob/master/D
## Getting Started
Duplicacy is written in Go. You can build the executable by running the following commands:
<details>
<summary>Installation</summary>
Duplicacy is written in Go. You can run the following command to build the executable (which will be created under `$GOPATH/bin`):
```
git clone https://github.com/gilbertchen/duplicacy.git
cd duplicacy
go get ./...
go build main/duplicacy_main.go
go get -u github.com/gilbertchen/duplicacy/...
```
You can also visit the [releases page](https://github.com/gilbertchen/duplicacy-cli/releases/latest) to download the version suitable for your platform. Installation is not needed.
You can also visit the [releases page](https://github.com/gilbertchen/duplicacy-cli/releases/latest) to download the pre-built binary suitable for your platform..
</details>
<details>
<summary>Commands</summary>
Once you have the Duplicacy executable on your path, you can change to the directory that you want to back up (called *repository*) and run the *init* command:
@@ -52,8 +59,16 @@ You can now create snapshots of the repository by invoking the *backup* command.
$ duplicacy backup -stats
```
The *restore* command rolls back the repository to a previous revision:
```sh
$ duplicacy restore -r 1
```
Duplicacy provides a set of commands, such as list, check, diff, cat history, to manage snapshots:
```makefile
$ duplicacy list # List all snapshots
$ duplicacy check # Check integrity of snapshots
@@ -62,10 +77,6 @@ $ duplicacy cat # Print a file in a snapshot
$ duplicacy history # Show how a file changes over time
```
The *restore* command rolls back the repository to a previous revision:
```sh
$ duplicacy restore -r 1
```
The *prune* command removes snapshots by revisions, or tags, or retention policies:
@@ -103,21 +114,26 @@ $ duplicacy copy -r 1 -to s3 # Copy snapshot at revision 1 to the s3 storage
$ duplicacy copy -to s3 # Copy every snapshot to the s3 storage
```
</details>
The [User Guide](https://github.com/gilbertchen/duplicacy-cli/blob/master/GUIDE.md) contains a complete reference to
all commands and other features of Duplicacy.
## Storages
Duplicacy currently supports local file storage, SFTP, and 5 cloud storage providers.
Duplicacy currently supports local file storage, SFTP, and many cloud storage providers.
#### Local disk
<details> <summary>Local disk</summary>
```
Storage URL: /path/to/storage (on Linux or Mac OS X)
C:\path\to\storage (on Windows)
```
</details>
#### SFTP
<details> <summary>SFTP</summary>
```
Storage URL: sftp://username@server/path/to/storage
@@ -125,7 +141,9 @@ Storage URL: sftp://username@server/path/to/storage
Login methods include password authentication and public key authentication. Due to a limitation of the underlying Go SSH library, the key pair for public key authentication must be generated without a passphrase. To work with a key that has a passphrase, you can set up SSH agent forwarding which is also supported by Duplicacy.
#### Dropbox
</details>
<details> <summary>Dropbox</summary>
```
Storage URL: dropbox://path/to/storage
@@ -139,7 +157,9 @@ For Duplicacy to access your Dropbox storage, you must provide an access token t
Dropbox has two advantages over other cloud providers. First, if you are already a paid user then to use the unused space as the backup storage is basically free. Second, unlike other providers Dropbox does not charge bandwidth or API usage fees.
#### Amazon S3
</details>
<details> <summary>Amazon S3</summary>
```
Storage URL: s3://amazon.com/bucket/path/to/storage (default region is us-east-1)
@@ -148,18 +168,43 @@ Storage URL: s3://amazon.com/bucket/path/to/storage (default region is us-east-
You'll need to input an access key and a secret key to access your Amazon S3 storage.
Minio-based S3 compatiable storages are also supported by using the `minio` or `minios` backends:
```
Storage URL: minio://region@host/bucket/path/to/storage (without TLS)
Storage URL: minios://region@host/bucket/path/to/storage (with TLS)
```
#### Google Cloud Storage
There is another backend that works with S3 compatible storage providers that require V2 signing:
```
Storage URL: s3c://region@host/bucket/path/to/storage
```
</details>
<details> <summary>Wasabi</summary>
```
Storage URL: s3://us-east-1@s3.wasabisys.com/bucket/path/to/storage
```
[Wasabi](https://wasabi.com) is a relatively new cloud storage service providing a S3-compatible API.
It is well suited for storing backups, because it is much cheaper than Amazon S3 with a storage cost of $.0039/GB/Month and a download fee of $0.04/GB, and no additional charges on API calls.
</details>
<details> <summary>Google Cloud Storage</summary>
```
Storage URL: gcs://bucket/path/to/storage
```
Starting from version 2.0.0, a new Google Cloud Storage backend is added which is implemented using the [official Google client library](https://godoc.org/cloud.google.com/go/storage). You must first obtain a credential file by [authorizing](https://duplicacy.com/gcp_start) Dupliacy to access your Google Cloud Storage account or by [downloading](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) a service account credential file.
Starting from version 2.0.0, a new Google Cloud Storage backend is added which is implemented using the [official Google client library](https://godoc.org/cloud.google.com/go/storage). You must first obtain a credential file by [authorizing](https://duplicacy.com/gcp_start) Duplicacy to access your Google Cloud Storage account or by [downloading](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) a service account credential file.
You can also use the s3 protocol to access Google Cloud Storage. To do this, you must enable the [s3 interoperability](https://cloud.google.com/storage/docs/migrating#migration-simple) in your Google Cloud Storage settings and set the storage url as `s3://storage.googleapis.com/bucket/path/to/storage`.
#### Microsoft Azure
</details>
<details> <summary>Microsoft Azure</summary>
```
Storage URL: azure://account/container
@@ -167,7 +212,9 @@ Storage URL: azure://account/container
You'll need to input the access key once prompted.
#### Backblaze
</details>
<details> <summary>Backblaze B2</summary>
```
Storage URL: b2://bucket
@@ -175,9 +222,11 @@ Storage URL: b2://bucket
You'll need to input the account id and application key.
Backblaze's B2 storage is not only the least expensive (at 0.5 cent per GB per month), but also the fastest. We have been working closely with their developers to leverage the full potentials provided by the B2 API in order to maximumize the transfer speed.
Backblaze's B2 storage is one of the least expensive (at 0.5 cent per GB per month, with a download fee of 2 cents per GB, plus additional charges for API calls).
#### Google Drive
</details>
<details> <summary>Google Drive</summary>
```
Storage URL: gcd://path/to/storage
@@ -186,7 +235,9 @@ Storage URL: gcd://path/to/storage
To use Google Drive as the storage, you first need to download a token file from https://duplicacy.com/gcd_start by
authorizing Duplicacy to access your Google Drive, and then enter the path to this token file to Duplicacy when prompted.
#### Microsoft OneDrive
</details>
<details> <summary>Microsoft OneDrive</summary>
```
Storage URL: one://path/to/storage
@@ -195,7 +246,9 @@ Storage URL: one://path/to/storage
To use Microsoft OneDrive as the storage, you first need to download a token file from https://duplicacy.com/one_start by
authorizing Duplicacy to access your OneDrive, and then enter the path to this token file to Duplicacy when prompted.
#### Hubic
</details>
<details> <summary>Hubic</summary>
```
Storage URL: hubic://path/to/storage
@@ -206,8 +259,9 @@ authorizing Duplicacy to access your Hubic drive, and then enter the path to thi
Hubic offers the most free space (25GB) of all major cloud providers and there is no bandwidth charge (same as Google Drive and OneDrive), so it may be worth a try.
</details>
## Comparison with Other Backup Tools
## Feature Comparison with Other Backup Tools
[duplicity](http://duplicity.nongnu.org) works by applying the rsync algorithm (or more specific, the [librsync](https://github.com/librsync/librsync) library)
to find the differences from previous backups and only then uploading the differences. It is the only existing backup tool with extensive cloud support -- the [long list](http://duplicity.nongnu.org/duplicity.1.html#sect7) of storage backends covers almost every cloud provider one can think of. However, duplicity's biggest flaw lies in its incremental model -- a chain of dependent backups starts with a full backup followed by a number of incremental ones, and ends when another full backup is uploaded. Deleting one backup will render useless all the subsequent backups on the same chain. Periodic full backups are required, in order to make previous backups disposable.
@@ -224,9 +278,9 @@ It is unclear if the lack of cloud backends is due to difficulties in porting th
[not recommended](http://librelist.com/browser//attic/2014/11/11/backing-up-multiple-servers-into-a-single-repository/#e96345aa5a3469a87786675d65da492b) by the developer due to chunk indices being kept in a local cache.
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks, and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be adapted for cloud storage APIs and reduces the level of deduplication.
[restic](https://restic.github.io) is a more recent addition. It is worth mentioning here because, like Duplicacy, it is written in Go. It uses a format similar to the git packfile format, but not exactly the same. Multiple clients backing up to the same storage are still guarded by
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks).
A command to delete old backups is in the developer's [plan](https://github.com/restic/restic/issues/18). S3 storage is supported, although it is unclear how hard it is to support other cloud storage APIs because of the need for locking. Overall, it still falls in the same category as Attic. Whether it will eventually reach the same level as Attic remains to be seen.
[restic](https://restic.github.io) is a more recent addition. It is worth mentioning here because, like Duplicacy, it is written in Go. It uses a format similar to the git packfile format. Multiple clients backing up to the same storage are still guarded by
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks). A prune operation will therefore completely block all other clients connected to the storage from doing their regular backups. Moreover, since most cloud storage services do not provide a locking service, the best effort is to use some basic file operations to simulate a lock, but distributed locking is known to be a hard problem and it is unclear how reliable restic's lock implementation is. A faulty implementation may cause a prune operation to accidentally delete data still in use, resulting in unrecoverable data loss. This is the exact problem that we avoided by taking the lock-free approach.
The following table compares the feature lists of all these backup tools:
@@ -239,13 +293,36 @@ The following table compares the feature lists of all these backup tools:
| Encryption | Yes | Yes | Yes | Yes | Yes | **Yes** |
| Deletion | No | No | Yes | Yes | No | **Yes** |
| Concurrent Access | No | No | Exclusive locking | Not recommended | Exclusive locking | **Lock-free** |
| Cloud Support | Extensive | No | No | No | S3 only | **S3, GCS, Azure, Dropbox, Backblaze, Google Drive, OneDrive, and Hubic**|
| Cloud Support | Extensive | No | No | No | S3, B2, OpenStack | **S3, GCS, Azure, Dropbox, Backblaze B2, Google Drive, OneDrive, and Hubic**|
| Snapshot Migration | No | No | No | No | No | **Yes** |
## Performance Comparison with Other Backup Tools
Duplicacy is not only more feature-rich but also faster than other backup tools. The following table lists the running times in seconds of backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other tools. Clearly Duplicacy is the fastest by a significant margin.
| | Duplicacy | restic | Attic | duplicity |
|:------------------:|:----------------:|:----------:|:----------:|:-----------:|
| Initial backup | 13.7 | 20.7 | 26.9 | 44.2 |
| 2nd backup | 4.8 | 8.0 | 15.4 | 19.5 |
| 3rd backup | 6.9 | 11.9 | 19.6 | 29.8 |
| 4th backup | 3.3 | 7.0 | 13.7 | 18.6 |
| 5th backup | 9.9 | 11.4 | 19.9 | 28.0 |
| 6th backup | 3.8 | 8.0 | 16.8 | 22.0 |
| 7th backup | 5.1 | 7.8 | 14.3 | 21.6 |
| 8th backup | 9.5 | 13.5 | 18.3 | 35.0 |
| 9th backup | 4.3 | 9.0 | 15.7 | 24.9 |
| 10th backup | 7.9 | 20.2 | 32.2 | 35.0 |
| 11th backup | 4.6 | 9.1 | 16.8 | 28.1 |
| 12th backup | 7.4 | 12.0 | 21.7 | 37.4 |
For more details and other speed comparison results, please visit https://github.com/gilbertchen/benchmarking. There you can also find test scripts that you can use to run your own experiments.
## License
Duplicacy CLI is released under the [Fair Source 5 License](https://fair.io), which means it is free for individual users or any company or organization with less than 5 users. If your company or organization has 5 or more users, then a license for the actual number of users must be purchased from [duplicacy.com](https://duplicacy.com/customer).
A user is defined as the owner of any files to be backed up by Duplicacy. If you are an IT administrator who uses Duplicacy to back up files for your colleagues, then each colleague will be counted in the user limit permitted by the license.
* Free for personal use or commercial trial
* Non-trial commercial use requires per-user licenses available from [duplicacy.com](https://duplicacy.com/customer) at a cost of $20 per year
* Commercial licenses are not required to restore or manage backups; only the backup command requires a valid commercial license
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package main
@@ -13,11 +13,13 @@ import (
"strings"
"strconv"
"os/exec"
"os/signal"
"encoding/json"
"github.com/gilbertchen/cli"
"github.com/gilbertchen/duplicacy/src"
"io/ioutil"
)
const (
@@ -36,14 +38,14 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
}
for {
stat, err := os.Stat(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY))
stat, err := os.Stat(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY)) //TOKEEP
if err != nil && !os.IsNotExist(err) {
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to retrieve the information about the directory %s: %v",
repository, err)
return "", nil
}
if stat != nil && stat.IsDir() {
if stat != nil && (stat.IsDir() || stat.Mode().IsRegular()) {
break
}
@@ -54,10 +56,10 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
}
repository = parent
}
duplicacy.LoadPreferences(repository)
duplicacy.SetKeyringFile(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "keyring"))
preferencePath := duplicacy.GetDuplicacyPreferencePath()
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
if storageName == "" {
storageName = context.String("storage")
@@ -137,13 +139,14 @@ func setGlobalOptions(context *cli.Context) {
duplicacy.RunInBackground = context.GlobalBool("background")
}
func runScript(context *cli.Context, repository string, storageName string, phase string) bool {
func runScript(context *cli.Context, storageName string, phase string) bool {
if !ScriptEnabled {
return false
}
scriptDir, _ := filepath.Abs(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "scripts"))
preferencePath := duplicacy.GetDuplicacyPreferencePath()
scriptDir, _ := filepath.Abs(path.Join(preferencePath, "scripts"))
scriptName := phase + "-" + context.Command.Name
script := path.Join(scriptDir, scriptName)
@@ -174,14 +177,14 @@ func runScript(context *cli.Context, repository string, storageName string, phas
}
func initRepository(context *cli.Context) {
configRespository(context, true)
configRepository(context, true)
}
func addStorage(context *cli.Context) {
configRespository(context, false)
configRepository(context, false)
}
func configRespository(context *cli.Context, init bool) {
func configRepository(context *cli.Context, init bool) {
setGlobalOptions(context)
defer duplicacy.CatchLogException()
@@ -220,21 +223,37 @@ func configRespository(context *cli.Context, init bool) {
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to retrieve the current working directory: %v", err)
return
}
duplicacyDirectory := path.Join(repository, duplicacy.DUPLICACY_DIRECTORY)
if stat, _ := os.Stat(path.Join(duplicacyDirectory, "preferences")); stat != nil {
preferencePath := context.String("pref-dir")
if preferencePath == "" {
preferencePath = path.Join(repository, duplicacy.DUPLICACY_DIRECTORY) // TOKEEP
}
if stat, _ := os.Stat(path.Join(preferencePath, "preferences")); stat != nil {
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
return
}
err = os.Mkdir(duplicacyDirectory, 0744)
err = os.Mkdir(preferencePath, 0744)
if err != nil && !os.IsExist(err) {
duplicacy.LOG_ERROR("REPOSITORY_INIT", "Failed to create the directory %s: %v",
duplicacy.DUPLICACY_DIRECTORY, err)
preferencePath, err)
return
}
duplicacy.SetKeyringFile(path.Join(duplicacyDirectory, "keyring"))
if context.String("pref-dir") != "" {
// out of tree preference file
// write real path into .duplicacy file inside repository
duplicacyFileName := path.Join(repository, duplicacy.DUPLICACY_FILE)
d1 := []byte(preferencePath)
err = ioutil.WriteFile(duplicacyFileName, d1, 0644)
if err != nil {
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to write %s file inside repository %v", duplicacyFileName, err)
return
}
}
duplicacy.SetDuplicacyPreferencePath(preferencePath)
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
} else {
repository, _ = getRepositoryPreference(context, "")
@@ -251,7 +270,7 @@ func configRespository(context *cli.Context, init bool) {
Encrypted: context.Bool("encrypt"),
}
storage := duplicacy.CreateStorage(repository, preference, true, 1)
storage := duplicacy.CreateStorage(preference, true, 1)
storagePassword := ""
if preference.Encrypted {
prompt := fmt.Sprintf("Enter storage password for %s:", preference.StorageURL)
@@ -341,7 +360,7 @@ func configRespository(context *cli.Context, init bool) {
}
otherStorage := duplicacy.CreateStorage(repository, *otherPreference, false, 1)
otherStorage := duplicacy.CreateStorage(*otherPreference, false, 1)
otherPassword := ""
if otherPreference.Encrypted {
@@ -368,7 +387,7 @@ func configRespository(context *cli.Context, init bool) {
duplicacy.Preferences = append(duplicacy.Preferences, preference)
duplicacy.SavePreferences(repository)
duplicacy.SavePreferences()
duplicacy.LOG_INFO("REPOSITORY_INIT", "%s will be backed up to %s with id %s",
repository, preference.StorageURL, preference.SnapshotID)
@@ -489,7 +508,7 @@ func setPreference(context *cli.Context) {
oldPreference.StorageURL)
} else {
*oldPreference = newPreference
duplicacy.SavePreferences(repository)
duplicacy.SavePreferences()
duplicacy.LOG_INFO("STORAGE_SET", "New options for storage %s have been saved", oldPreference.StorageURL)
}
}
@@ -506,16 +525,18 @@ func changePassword(context *cli.Context) {
os.Exit(ArgumentExitCode)
}
repository, preference := getRepositoryPreference(context, "")
_, preference := getRepositoryPreference(context, "")
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
storage := duplicacy.CreateStorage(*preference, false, 1)
if storage == nil {
return
}
password := ""
if preference.Encrypted {
password = duplicacy.GetPassword(*preference, "password", "Enter old password for storage %s:", false, true)
password = duplicacy.GetPassword(*preference, "password",
fmt.Sprintf("Enter old password for storage %s:", preference.StorageURL),
false, true)
}
config, _, err := duplicacy.DownloadConfig(storage, password)
@@ -547,7 +568,6 @@ func changePassword(context *cli.Context) {
duplicacy.LOG_INFO("STORAGE_SET", "The password for storage %s has been changed", preference.StorageURL)
}
func backupRepository(context *cli.Context) {
setGlobalOptions(context)
defer duplicacy.CatchLogException()
@@ -566,7 +586,7 @@ func backupRepository(context *cli.Context) {
return
}
runScript(context, repository, preference.Name, "pre")
runScript(context, preference.Name, "pre")
threads := context.Int("threads")
if threads < 1 {
@@ -574,7 +594,7 @@ func backupRepository(context *cli.Context) {
}
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
storage := duplicacy.CreateStorage(repository, *preference, false, threads)
storage := duplicacy.CreateStorage(*preference, false, threads)
if storage == nil {
return
}
@@ -598,10 +618,10 @@ func backupRepository(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(repository, preference.Name)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS)
runScript(context, repository, preference.Name, "post")
runScript(context, preference.Name, "post")
}
func restoreRepository(context *cli.Context) {
@@ -623,7 +643,7 @@ func restoreRepository(context *cli.Context) {
return
}
runScript(context, repository, preference.Name, "pre")
runScript(context, preference.Name, "pre")
threads := context.Int("threads")
if threads < 1 {
@@ -631,7 +651,7 @@ func restoreRepository(context *cli.Context) {
}
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
storage := duplicacy.CreateStorage(repository, *preference, false, threads)
storage := duplicacy.CreateStorage(*preference, false, threads)
if storage == nil {
return
}
@@ -673,10 +693,10 @@ func restoreRepository(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(repository, preference.Name)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, showStatistics, patterns)
runScript(context, repository, preference.Name, "post")
runScript(context, preference.Name, "post")
}
func listSnapshots(context *cli.Context) {
@@ -693,10 +713,10 @@ func listSnapshots(context *cli.Context) {
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
runScript(context, repository, preference.Name, "pre")
runScript(context, preference.Name, "pre")
resetPassword := context.Bool("reset-passwords")
storage := duplicacy.CreateStorage(repository, *preference, resetPassword, 1)
storage := duplicacy.CreateStorage(*preference, resetPassword, 1)
if storage == nil {
return
}
@@ -723,10 +743,10 @@ func listSnapshots(context *cli.Context) {
showFiles := context.Bool("files")
showChunks := context.Bool("chunks")
backupManager.SetupSnapshotCache(repository, preference.Name)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
runScript(context, repository, preference.Name, "post")
runScript(context, preference.Name, "post")
}
func checkSnapshots(context *cli.Context) {
@@ -743,9 +763,9 @@ func checkSnapshots(context *cli.Context) {
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
runScript(context, repository, preference.Name, "pre")
runScript(context, preference.Name, "pre")
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
storage := duplicacy.CreateStorage(*preference, false, 1)
if storage == nil {
return
}
@@ -773,10 +793,10 @@ func checkSnapshots(context *cli.Context) {
searchFossils := context.Bool("fossils")
resurrect := context.Bool("resurrect")
backupManager.SetupSnapshotCache(repository, preference.Name)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, checkFiles, searchFossils, resurrect)
runScript(context, repository, preference.Name, "post")
runScript(context, preference.Name, "post")
}
func printFile(context *cli.Context) {
@@ -791,11 +811,11 @@ func printFile(context *cli.Context) {
repository, preference := getRepositoryPreference(context, "")
runScript(context, repository, preference.Name, "pre")
runScript(context, preference.Name, "pre")
// Do not print out storage for this command
//duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
storage := duplicacy.CreateStorage(*preference, false, 1)
if storage == nil {
return
}
@@ -815,7 +835,7 @@ func printFile(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(repository, preference.Name)
backupManager.SetupSnapshotCache(preference.Name)
file := ""
if len(context.Args()) > 0 {
@@ -823,7 +843,7 @@ func printFile(context *cli.Context) {
}
backupManager.SnapshotManager.PrintFile(snapshotID, revision, file)
runScript(context, repository, preference.Name, "post")
runScript(context, preference.Name, "post")
}
func diff(context *cli.Context) {
@@ -838,10 +858,10 @@ func diff(context *cli.Context) {
repository, preference := getRepositoryPreference(context, "")
runScript(context, repository, preference.Name, "pre")
runScript(context, preference.Name, "pre")
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
storage := duplicacy.CreateStorage(*preference, false, 1)
if storage == nil {
return
}
@@ -872,10 +892,10 @@ func diff(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(repository, preference.Name)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash)
runScript(context, repository, preference.Name, "post")
runScript(context, preference.Name, "post")
}
func showHistory(context *cli.Context) {
@@ -890,10 +910,10 @@ func showHistory(context *cli.Context) {
repository, preference := getRepositoryPreference(context, "")
runScript(context, repository, preference.Name, "pre")
runScript(context, preference.Name, "pre")
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
storage := duplicacy.CreateStorage(*preference, false, 1)
if storage == nil {
return
}
@@ -915,10 +935,10 @@ func showHistory(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(repository, preference.Name)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.ShowHistory(repository, snapshotID, revisions, path, showLocalHash)
runScript(context, repository, preference.Name, "post")
runScript(context, preference.Name, "post")
}
func pruneSnapshots(context *cli.Context) {
@@ -933,10 +953,10 @@ func pruneSnapshots(context *cli.Context) {
repository, preference := getRepositoryPreference(context, "")
runScript(context, repository, preference.Name, "pre")
runScript(context, preference.Name, "pre")
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
storage := duplicacy.CreateStorage(*preference, false, 1)
if storage == nil {
return
}
@@ -973,11 +993,11 @@ func pruneSnapshots(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(repository, preference.Name)
backupManager.SnapshotManager.PruneSnapshots(repository, selfID, snapshotID, revisions, tags, retentions,
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.PruneSnapshots(selfID, snapshotID, revisions, tags, retentions,
exhaustive, exclusive, ignoredIDs, dryRun, deleteOnly, collectOnly)
runScript(context, repository, preference.Name, "post")
runScript(context, preference.Name, "post")
}
func copySnapshots(context *cli.Context) {
@@ -992,10 +1012,10 @@ func copySnapshots(context *cli.Context) {
repository, source := getRepositoryPreference(context, context.String("from"))
runScript(context, repository, source.Name, "pre")
runScript(context, source.Name, "pre")
duplicacy.LOG_INFO("STORAGE_SET", "Source storage set to %s", source.StorageURL)
sourceStorage := duplicacy.CreateStorage(repository, *source, false, 1)
sourceStorage := duplicacy.CreateStorage(*source, false, 1)
if sourceStorage == nil {
return
}
@@ -1006,7 +1026,7 @@ func copySnapshots(context *cli.Context) {
}
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword)
sourceManager.SetupSnapshotCache(repository, source.Name)
sourceManager.SetupSnapshotCache(source.Name)
duplicacy.SavePassword(*source, "password", sourcePassword)
@@ -1025,7 +1045,7 @@ func copySnapshots(context *cli.Context) {
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
destinationStorage := duplicacy.CreateStorage(repository, *destination, false, 1)
destinationStorage := duplicacy.CreateStorage(*destination, false, 1)
if destinationStorage == nil {
return
}
@@ -1042,7 +1062,7 @@ func copySnapshots(context *cli.Context) {
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
destinationPassword)
duplicacy.SavePassword(*destination, "password", destinationPassword)
destinationManager.SetupSnapshotCache(repository, destination.Name)
destinationManager.SetupSnapshotCache(destination.Name)
revisions := getRevisions(context)
snapshotID := ""
@@ -1056,7 +1076,7 @@ func copySnapshots(context *cli.Context) {
}
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, threads)
runScript(context, repository, source.Name, "post")
runScript(context, source.Name, "post")
}
func infoStorage(context *cli.Context) {
@@ -1071,7 +1091,9 @@ func infoStorage(context *cli.Context) {
repository := context.String("repository")
if repository != "" {
duplicacy.SetKeyringFile(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "keyring"))
preferencePath := path.Join(repository, duplicacy.DUPLICACY_DIRECTORY)
duplicacy.SetDuplicacyPreferencePath(preferencePath)
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
}
isEncrypted := context.Bool("e")
@@ -1088,7 +1110,7 @@ func infoStorage(context *cli.Context) {
password = duplicacy.GetPassword(preference, "password", "Enter the storage password:", false, false)
}
storage := duplicacy.CreateStorage("", preference, context.Bool("reset-passwords"), 1)
storage := duplicacy.CreateStorage(preference, context.Bool("reset-passwords"), 1)
config, isStorageEncrypted, err := duplicacy.DownloadConfig(storage, password)
if isStorageEncrypted {
@@ -1132,6 +1154,11 @@ func main() {
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
Argument: "1M",
},
cli.StringFlag{
Name: "pref-dir",
Usage: "Specify alternate location for .duplicacy preferences directory (absolute or relative to current directory)",
Argument: "<preferences directory path>",
},
},
Usage: "Initialize the storage if necessary and the current directory as the repository",
ArgsUsage: "<snapshot id> <storage url>",
@@ -1658,7 +1685,18 @@ func main() {
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.0.2"
app.Version = "2.0.7"
// If the program is interrupted, call the RunAtError function.
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for _ = range c {
duplicacy.RunAtError()
os.Exit(1)
}
}()
err := app.Run(os.Args)
if err != nil {
os.Exit(2)

18
integration_tests/fixed_test.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
# Sanity test for the fixed-size chunking algorithm
. ./test_functions.sh
fixture
pushd ${TEST_REPO}
${DUPLICACY} init integration-tests $TEST_STORAGE -c 64 -max 64 -min 64
add_file file3
add_file file4
${DUPLICACY} backup
${DUPLICACY} check --files -stats
popd

View File

@@ -0,0 +1,38 @@
#!/bin/bash
. ./test_functions.sh
fixture
pushd ${TEST_REPO}
${DUPLICACY} init integration-tests $TEST_STORAGE -c 4
# Create 10 small files
add_file file1 20
add_file file2 20
rm file3; touch file3
add_file file4 20
chmod u-r file4
add_file file5 20
add_file file6 20
add_file file7 20
add_file file8 20
add_file file9 20
add_file file10 20
# Fail at the 10th chunk
env DUPLICACY_FAIL_CHUNK=10 ${DUPLICACY} backup
# Try it again to test the multiple-resume case
env DUPLICACY_FAIL_CHUNK=5 ${DUPLICACY} backup
add_file file1 20
add_file file2 20
# Fail the backup before uploading the snapshot
env DUPLICACY_FAIL_SNAPSHOT=true ${DUPLICACY} backup
# Now complete the backup
${DUPLICACY} backup
${DUPLICACY} check --files
popd

View File

@@ -0,0 +1,28 @@
#!/bin/bash
# Testing backup and restore of sparse files
. ./test_functions.sh
fixture
pushd ${TEST_REPO}
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1m
for i in `seq 1 10`; do
dd if=/dev/urandom of=file3 bs=1000 count=1000 seek=$((100000 * $i))
done
ls -lsh file3
${DUPLICACY} backup
${DUPLICACY} check --files -stats
rm file1 file3
${DUPLICACY} restore -r 1
${DUPLICACY} -v restore -r 1 -overwrite -stats -hash
ls -lsh file3
popd

18
integration_tests/test.sh Executable file
View File

@@ -0,0 +1,18 @@
#!/bin/bash
. ./test_functions.sh
fixture
init_repo_pref_dir
backup
add_file file3
backup
add_file file4
chmod u-r ${TEST_REPO}/file4
backup
add_file file5
restore
check

View File

@@ -0,0 +1,123 @@
#!/bin/bash
get_abs_filename() {
# $1 : relative filename
echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
}
pushd () {
command pushd "$@" > /dev/null
}
popd () {
command popd "$@" > /dev/null
}
# Functions used to create integration tests suite
DUPLICACY=$(get_abs_filename ../duplicacy_main)
# Base directory where test repositories will be created
TEST_ZONE=$HOME/DUPLICACY_TEST_ZONE
# Test Repository
TEST_REPO=$TEST_ZONE/TEST_REPO
# Storage for test ( For now, only local path storage is supported by test suite)
TEST_STORAGE=$TEST_ZONE/TEST_STORAGE
# Extra storage for copy operation
SECONDARY_STORAGE=$TEST_ZONE/SECONDARY_STORAGE
# Preference directory ( for testing the -pref-dir option)
DUPLICACY_PREF_DIR=$TEST_ZONE/TEST_DUPLICACY_PREF_DIR
# Scratch pad for testing restore
TEST_RESTORE_POINT=$TEST_ZONE/RESTORE_POINT
# Make sure $TEST_ZONE is in know state
function fixture()
{
# clean TEST_RESTORE_POINT
rm -rf $TEST_RESTORE_POINT
mkdir -p $TEST_RESTORE_POINT
# clean TEST_STORAGE
rm -rf $TEST_STORAGE
mkdir -p $TEST_STORAGE
# clean SECONDARY_STORAGE
rm -rf $SECONDARY_STORAGE
mkdir -p $SECONDARY_STORAGE
# clean TEST_DOT_DUPLICACY
rm -rf $DUPLICACY_PREF_DIR
mkdir -p $DUPLICACY_PREF_DIR
# Create test repository
rm -rf ${TEST_REPO}
mkdir -p ${TEST_REPO}
pushd ${TEST_REPO}
echo "file1" > file1
mkdir dir1
echo "file2" > dir1/file2
popd
}
function init_repo()
{
pushd ${TEST_REPO}
${DUPLICACY} init integration-tests $TEST_STORAGE
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
${DUPLICACY} backup
popd
}
function init_repo_pref_dir()
{
pushd ${TEST_REPO}
${DUPLICACY} init -pref-dir "${DUPLICACY_PREF_DIR}" integration-tests ${TEST_STORAGE}
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
${DUPLICACY} backup
popd
}
function add_file()
{
FILE_NAME=$1
FILE_SIZE=${2:-20000000}
pushd ${TEST_REPO}
dd if=/dev/urandom of=${FILE_NAME} bs=1 count=$(($RANDOM % ${FILE_SIZE})) &> /dev/null
popd
}
function backup()
{
pushd ${TEST_REPO}
${DUPLICACY} backup
${DUPLICACY} copy -from default -to secondary
popd
}
function restore()
{
pushd ${TEST_REPO}
${DUPLICACY} restore -r 2 -delete
popd
}
function check()
{
pushd ${TEST_REPO}
${DUPLICACY} check -files
${DUPLICACY} check -storage secondary -files
popd
}

View File

@@ -0,0 +1,17 @@
#!/bin/bash
. ./test_functions.sh
fixture
pushd ${TEST_REPO}
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1k
add_file file3
add_file file4
${DUPLICACY} backup -threads 16
${DUPLICACY} check --files -stats
popd

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -14,14 +14,13 @@ import (
type AzureStorage struct {
RateLimitedStorage
clients []*storage.BlobStorageClient
container string
containers []*storage.Container
}
func CreateAzureStorage(accountName string, accountKey string,
container string, threads int) (azureStorage *AzureStorage, err error) {
containerName string, threads int) (azureStorage *AzureStorage, err error) {
var clients []*storage.BlobStorageClient
var containers []*storage.Container
for i := 0; i < threads; i++ {
client, err := storage.NewBasicClient(accountName, accountKey)
@@ -31,21 +30,21 @@ func CreateAzureStorage(accountName string, accountKey string,
}
blobService := client.GetBlobService()
clients = append(clients, &blobService)
container := blobService.GetContainerReference(containerName)
containers = append(containers, container)
}
exist, err := clients[0].ContainerExists(container)
exist, err := containers[0].Exists()
if err != nil {
return nil, err
}
if !exist {
return nil, fmt.Errorf("container %s does not exist", container)
return nil, fmt.Errorf("container %s does not exist", containerName)
}
azureStorage = &AzureStorage {
clients: clients,
container: container,
containers: containers,
}
return
@@ -77,7 +76,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
for {
results, err := azureStorage.clients[threadIndex].ListBlobs(azureStorage.container, parameters)
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
if err != nil {
return nil, nil, err
}
@@ -115,14 +114,15 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
_, err = storage.clients[threadIndex].DeleteBlobIfExists(storage.container, filePath)
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
return err
}
// MoveFile renames the file.
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
source := storage.clients[threadIndex].GetBlobURL(storage.container, from)
err = storage.clients[threadIndex].CopyBlob(storage.container, to, source)
source := storage.containers[threadIndex].GetBlobReference(from)
destination := storage.containers[threadIndex].GetBlobReference(to)
err = destination.Copy(source.GetURL(), nil)
if err != nil {
return err
}
@@ -136,7 +136,8 @@ func (storage *AzureStorage) CreateDirectory(threadIndex int, dir string) (err e
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
properties, err := storage.clients[threadIndex].GetBlobProperties(storage.container, filePath)
blob := storage.containers[threadIndex].GetBlobReference(filePath)
err = blob.GetProperties(nil)
if err != nil {
if strings.Contains(err.Error(), "404") {
return false, false, 0, nil
@@ -145,7 +146,7 @@ func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exis
}
}
return true, false, properties.ContentLength, nil
return true, false, blob.Properties.ContentLength, nil
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
@@ -167,21 +168,22 @@ func (storage *AzureStorage) FindChunk(threadIndex int, chunkID string, isFossil
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, err := storage.clients[threadIndex].GetBlob(storage.container, filePath)
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
if err != nil {
return err
}
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.clients))
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.containers))
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.clients))
return storage.clients[threadIndex].CreateBlockBlobFromReader(storage.container, filePath, uint64(len(content)), reader, nil)
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.containers))
blob := storage.containers[threadIndex].GetBlobReference(filePath)
return blob.CreateBlockBlobFromReader(reader, nil)
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -135,7 +135,7 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
return nil, 0, err
}
if response.StatusCode < 400 {
if response.StatusCode < 300 {
return response.Body, response.ContentLength, nil
}
@@ -160,6 +160,10 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
backoff = client.retry(backoff, response)
continue
} else {
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
backoff = client.retry(backoff, response)
continue
}
defer response.Body.Close()
@@ -487,7 +491,7 @@ func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit in
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
if response.StatusCode < 400 {
if response.StatusCode < 300 {
return nil
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -13,6 +13,7 @@ import (
"path"
"time"
"sort"
"sync"
"sync/atomic"
"strings"
"strconv"
@@ -70,11 +71,12 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
// directory
func (manager *BackupManager) SetupSnapshotCache(top string, storageName string) bool {
func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
preferencePath := GetDuplicacyPreferencePath()
cacheDir := path.Join(preferencePath, "cache", storageName)
cacheDir := path.Join(top, DUPLICACY_DIRECTORY, "cache", storageName)
storage, err := CreateFileStorage(cacheDir, 1)
storage, err := CreateFileStorage(cacheDir, 2, false, 1)
if err != nil {
LOG_ERROR("BACKUP_CACHE", "Failed to create the snapshot cache dir: %v", err)
return false
@@ -93,11 +95,19 @@ func (manager *BackupManager) SetupSnapshotCache(top string, storageName string)
return true
}
// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
// original unchanged entry list.
//
// This function assumes the Size field of each entry is equal to the length of the chunk content that belong
// to the file.
func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
if len(entries) == 0 {
return
}
// The following code works by iterating over 'entries' and 'chunkLength' and keeping track of the
// accumulated total file size and the accumulated total chunk size.
i := 0
totalChunkSize := int64(0)
totalFileSize := entries[i].Size
@@ -114,6 +124,8 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
break
}
// If the current file ends at the end of the current chunk, the next file will
// start at the next chunk
if totalChunkSize + int64(length) == totalFileSize {
entries[i].StartChunk = j + 1 + offset
entries[i].StartOffset = 0
@@ -125,8 +137,17 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
totalFileSize += entries[i].Size
}
if i >= len(entries) {
break
}
totalChunkSize += int64(length)
}
// If there are some unvisited entries (which happens when saving an incomplete snapshot),
// set their sizes to -1 so they won't be saved to the incomplete snapshot
for j := i; j < len(entries); j++ {
entries[j].Size = -1
}
}
// Backup creates a snapshot for the repository 'top'. If 'quickMode' is true, only files with different sizes
@@ -149,7 +170,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
if remoteSnapshot == nil {
quickMode = false
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
LOG_INFO("BACKUP_START", "No previous backup found")
} else {
@@ -170,35 +190,79 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// UploadChunk.
chunkCache := make(map[string]bool)
var incompleteSnapshot *Snapshot
// A revision number of 0 means this is the initial backup
if remoteSnapshot.Revision > 0 {
// Add all chunks in the last snapshot to the
// Add all chunks in the last snapshot to the cache
for _, chunkID := range manager.SnapshotManager.GetSnapshotChunks(remoteSnapshot) {
chunkCache[chunkID] = true
}
} else if manager.storage.IsFastListing() {
// If the listing operation is fast, list all chunks and put them in the cache.
LOG_INFO("BACKUP_LIST", "Listing all chunks")
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
for _, chunk := range allChunks {
if len(chunk) == 0 || chunk[len(chunk) - 1] == '/' {
continue
}
if strings.HasSuffix(chunk, ".fsl") {
continue
}
chunk = strings.Replace(chunk, "/", "", -1)
chunkCache[chunk] = true
} else {
// In quick mode, attempt to load the incomplete snapshot from last incomplete backup if there is one.
if quickMode {
incompleteSnapshot = LoadIncompleteSnapshot()
}
// If the listing operation is fast or there is an incomplete snapshot, list all chunks and
// put them in the cache.
if manager.storage.IsFastListing() || incompleteSnapshot != nil {
LOG_INFO("BACKUP_LIST", "Listing all chunks")
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
for _, chunk := range allChunks {
if len(chunk) == 0 || chunk[len(chunk) - 1] == '/' {
continue
}
if strings.HasSuffix(chunk, ".fsl") {
continue
}
chunk = strings.Replace(chunk, "/", "", -1)
chunkCache[chunk] = true
}
}
if incompleteSnapshot != nil {
// This is the last chunk from the incomplete snapshot that can be found in the cache
lastCompleteChunk := -1
for i, chunkHash := range incompleteSnapshot.ChunkHashes {
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
if _, ok := chunkCache[chunkID]; ok {
lastCompleteChunk = i
} else {
break
}
}
// Only keep those files whose chunks exist in the cache
var files []*Entry
for _, file := range incompleteSnapshot.Files {
if file.StartChunk <= lastCompleteChunk && file.EndChunk <= lastCompleteChunk {
files = append(files, file)
} else {
break
}
}
incompleteSnapshot.Files = files
// Remove incomplete chunks (they may not have been uploaded)
incompleteSnapshot.ChunkHashes = incompleteSnapshot.ChunkHashes[:lastCompleteChunk + 1]
incompleteSnapshot.ChunkLengths = incompleteSnapshot.ChunkLengths[:lastCompleteChunk + 1]
remoteSnapshot = incompleteSnapshot
LOG_INFO("FILE_SKIP", "Skipped %d files from previous incomplete backup", len(files))
}
}
var numberOfNewFileChunks int // number of new file chunks
var numberOfNewFileChunks int64 // number of new file chunks
var totalUploadedFileChunkLength int64 // total length of uploaded file chunks
var totalUploadedFileChunkBytes int64 // how many actual bytes have been uploaded
var numberOfNewSnapshotChunks int // number of new snapshot chunks
var totalUploadedSnapshotChunkLength int64 // size of uploaded snapshot chunks
var totalUploadedSnapshotChunkBytes int64 // how many actual bytes have been uploaded
@@ -210,10 +274,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var modifiedEntries [] *Entry // Files that has been modified or newly created
var preservedEntries [] *Entry // Files unchanges
// If the quick mode is enabled, we simply treat all files as if they were new, and break them into chunks.
// If the quick mode is disable and there isn't an incomplete snapshot from last (failed) backup,
// we simply treat all files as if they were new, and break them into chunks.
// Otherwise, we need to find those that are new or recently modified
if !quickMode {
if remoteSnapshot.Revision == 0 && incompleteSnapshot == nil {
modifiedEntries = localSnapshot.Files
for _, entry := range modifiedEntries {
totalModifiedFileSize += entry.Size
@@ -267,7 +332,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var preservedChunkHashes []string
var preservedChunkLengths []int
// For each preserved file, adjust the indices StartChunk and EndChunk. This is done by finding gaps
// For each preserved file, adjust the StartChunk and EndChunk pointers. This is done by finding gaps
// between these indices and subtracting the number of deleted chunks.
last := -1
deletedChunks := 0
@@ -294,6 +359,13 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var uploadedEntries [] *Entry
var uploadedChunkHashes []string
var uploadedChunkLengths []int
var uploadedChunkLock = &sync.Mutex{}
// Set all file sizes to -1 to indicate they haven't been processed. This must be done before creating the file
// reader because the file reader may skip inaccessible files on construction.
for _, entry := range modifiedEntries {
entry.Size = -1
}
// the file reader implements the Reader interface. When an EOF is encounter, it opens the next file unless it
// is the last file.
@@ -314,9 +386,48 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
keepUploadAlive = int64(value)
}
// Fail at the chunk specified by DUPLICACY_FAIL_CHUNK to simulate a backup error
chunkToFail := -1
if value, found := os.LookupEnv("DUPLICACY_FAIL_CHUNK"); found {
chunkToFail, _ = strconv.Atoi(value)
LOG_INFO("SNAPSHOT_FAIL", "Will abort the backup on chunk %d", chunkToFail)
}
chunkMaker := CreateChunkMaker(manager.config, false)
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
localSnapshotReady := false
var once sync.Once
if remoteSnapshot.Revision == 0 {
// In case an error occurs during the initial backup, save the incomplete snapshot
RunAtError = func() {
once.Do(
func() {
if !localSnapshotReady {
// Lock it to gain exclusive access to uploadedChunkHashes and uploadedChunkLengths
uploadedChunkLock.Lock()
setEntryContent(uploadedEntries, uploadedChunkLengths, len(preservedChunkHashes))
if len(preservedChunkHashes) > 0 {
//localSnapshot.Files = preservedEntries
//localSnapshot.Files = append(preservedEntries, uploadedEntries...)
localSnapshot.ChunkHashes = preservedChunkHashes
localSnapshot.ChunkHashes = append(localSnapshot.ChunkHashes, uploadedChunkHashes...)
localSnapshot.ChunkLengths = preservedChunkLengths
localSnapshot.ChunkLengths = append(localSnapshot.ChunkLengths, uploadedChunkLengths...)
} else {
//localSnapshot.Files = uploadedEntries
localSnapshot.ChunkHashes = uploadedChunkHashes
localSnapshot.ChunkLengths = uploadedChunkLengths
}
uploadedChunkLock.Unlock()
}
SaveIncompleteSnapshot(localSnapshot)
})
}
}
if fileReader.CurrentFile != nil {
LOG_TRACE("PACK_START", "Packing %s", fileReader.CurrentEntry.Path)
@@ -337,16 +448,16 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
LOG_DEBUG("CHUNK_CACHE", "Skipped chunk %s in cache", chunk.GetID())
} else {
if uploadSize > 0 {
numberOfNewFileChunks++
totalUploadedFileChunkLength += int64(chunkSize)
totalUploadedFileChunkBytes += int64(uploadSize)
atomic.AddInt64(&numberOfNewFileChunks, 1)
atomic.AddInt64(&totalUploadedFileChunkLength, int64(chunkSize))
atomic.AddInt64(&totalUploadedFileChunkBytes, int64(uploadSize))
action = "Uploaded"
} else {
LOG_DEBUG("CHUNK_EXIST", "Skipped chunk %s in the storage", chunk.GetID())
}
}
uploadedModifiedFileSize += int64(chunkSize)
uploadedModifiedFileSize := atomic.AddInt64(&uploadedModifiedFileSize, int64(chunkSize))
if IsTracing() || showStatistics {
now := time.Now().Unix()
@@ -397,24 +508,33 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
chunkUploader.StartChunk(chunk, chunkIndex)
}
// Must lock it because the RunAtError function called by other threads may access these two slices
uploadedChunkLock.Lock()
uploadedChunkHashes = append(uploadedChunkHashes, hash)
uploadedChunkLengths = append(uploadedChunkLengths, chunkSize)
uploadedChunkLock.Unlock()
if len(uploadedChunkHashes) == chunkToFail {
LOG_ERROR("SNAPSHOT_FAIL", "Artificially fail the chunk %d for testing purposes", chunkToFail)
}
},
func (fileSize int64, hash string) (io.Reader, bool) {
// Must lock here because the RunAtError function called by other threads may access uploadedEntries
uploadedChunkLock.Lock()
defer uploadedChunkLock.Unlock()
// This function is called when a new file is needed
entry := fileReader.CurrentEntry
entry.Hash = hash
if entry.Size != fileSize {
totalModifiedFileSize += fileSize - entry.Size
entry.Size = fileSize
}
entry.Size = fileSize
uploadedEntries = append(uploadedEntries, entry)
if !showStatistics || IsTracing() || RunInBackground {
LOG_INFO("PACK_END", "Packed %s (%d)", entry.Path, entry.Size)
}
fileReader.NextFile()
if fileReader.CurrentFile != nil {
@@ -444,20 +564,28 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
localSnapshot.ChunkLengths = uploadedChunkLengths
}
localSnapshotReady = true
localSnapshot.EndTime = time.Now().Unix()
err = manager.SnapshotManager.CheckSnapshot(localSnapshot)
if err != nil {
RunAtError = func() {} // Don't save the incomplete snapshot
LOG_ERROR("SNAPSHOT_CHECK", "The snapshot contains an error: %v", err)
return false
}
localSnapshot.Tag = tag
localSnapshot.Options = ""
if !quickMode {
if !quickMode || remoteSnapshot.Revision == 0 {
localSnapshot.Options = "-hash"
}
if _, found := os.LookupEnv("DUPLICACY_FAIL_SNAPSHOT"); found {
LOG_ERROR("SNAPSHOT_FAIL", "Artificially fail the backup for testing purposes")
return false
}
if shadowCopy {
if localSnapshot.Options == "" {
localSnapshot.Options = "-vss"
@@ -504,6 +632,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
manager.SnapshotManager.CleanSnapshotCache(localSnapshot, nil)
LOG_INFO("BACKUP_END", "Backup for %s at revision %d completed", top, localSnapshot.Revision)
RunAtError = func() {}
RemoveIncompleteSnapshot()
totalSnapshotChunks := len(localSnapshot.FileSequence) + len(localSnapshot.ChunkSequence) +
len(localSnapshot.LengthSequence)
@@ -527,7 +657,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
LOG_INFO("BACKUP_STATS", "All chunks: %d total, %s bytes; %d new, %s bytes, %s bytes uploaded",
len(localSnapshot.ChunkHashes) + totalSnapshotChunks,
PrettyNumber(totalFileChunkLength + totalSnapshotChunkLength),
numberOfNewFileChunks + numberOfNewSnapshotChunks,
int(numberOfNewFileChunks) + numberOfNewSnapshotChunks,
PrettyNumber(totalUploadedFileChunkLength + totalUploadedSnapshotChunkLength),
PrettyNumber(totalUploadedFileChunkBytes + totalUploadedSnapshotChunkBytes))
@@ -585,6 +715,11 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
LOG_DEBUG("RESTORE_PARAMETERS", "top: %s, revision: %d, in-place: %t, quick: %t, delete: %t",
top, revision, inPlace, quickMode, deleteMode)
if !strings.HasPrefix(GetDuplicacyPreferencePath(), top) {
LOG_INFO("RESTORE_INPLACE", "Forcing in-place mode with a non-default preference path")
inPlace = true
}
if len(patterns) > 0 {
for _, pattern := range patterns {
LOG_TRACE("RESTORE_PATTERN", "%s", pattern)
@@ -600,6 +735,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
}
}
// How will behave restore when repo created using -repo-dir ,??
err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744)
if err != nil && !os.IsExist(err) {
LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err)
@@ -645,6 +781,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
i := 0
for _, entry := range remoteSnapshot.Files {
skipped := false
// Find local files that don't exist in the remote snapshot
for i < len(localSnapshot.Files) {
local := localSnapshot.Files[i]
@@ -656,11 +793,18 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
} else {
if compare == 0 {
i++
if quickMode && local.IsSameAs(entry) {
skipped = true
}
}
break
}
}
if skipped {
continue
}
fullPath := joinPath(top, entry.Path)
if entry.IsLink() {
stat, err := os.Lstat(fullPath)
@@ -774,7 +918,9 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
if deleteMode && len(patterns) == 0 {
for _, file := range extraFiles {
// Reverse the order to make sure directories are empty before being deleted
for i := range extraFiles {
file := extraFiles[len(extraFiles) - 1 - i]
fullPath := joinPath(top, file)
os.Remove(fullPath)
LOG_INFO("RESTORE_DELETE", "Deleted %s", file)
@@ -788,8 +934,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
}
}
RemoveEmptyDirectories(top)
if showStatistics {
for _, file := range downloadedFiles {
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size)
@@ -978,8 +1122,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
var existingFile, newFile *os.File
var err error
temporaryPath := path.Join(top, DUPLICACY_DIRECTORY, "temporary")
preferencePath := GetDuplicacyPreferencePath()
temporaryPath := path.Join(preferencePath, "temporary")
fullPath := joinPath(top, entry.Path)
defer func() {
@@ -1005,33 +1150,36 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
var offset int64
existingFile, err = os.Open(fullPath)
if err != nil && !os.IsNotExist(err) {
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
}
fileHash := ""
if existingFile != nil {
// Break existing file into chunks.
chunkMaker.ForEachChunk(
existingFile,
func (chunk *Chunk, final bool) {
hash := chunk.GetHash()
chunkSize := chunk.GetLength()
existingChunks = append(existingChunks, hash)
existingLengths = append(existingLengths, chunkSize)
offsetMap[hash] = offset
lengthMap[hash] = chunkSize
offset += int64(chunkSize)
},
func (fileSize int64, hash string) (io.Reader, bool) {
fileHash = hash
return nil, false
})
if fileHash == entry.Hash {
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
return false
if err != nil {
if os.IsNotExist(err) {
if inPlace && entry.Size > 100 * 1024 * 1024 {
// Create an empty sparse file
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
if err != nil {
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing", fullPath)
return false
}
_, err = existingFile.Seek(entry.Size - 1, 0)
if err != nil {
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing", fullPath)
return false
}
_, err = existingFile.Write([]byte("\x00"))
if err != nil {
LOG_ERROR("DOWNLOAD_CREATE", "Failed to initialize the sparse file %s for in-place writing", fullPath)
return false
}
existingFile.Close()
existingFile, err = os.Open(fullPath)
if err != nil {
LOG_ERROR("DOWNLOAD_OPEN", "Can't reopen the initial file just created: %v", err)
return false
}
}
} else {
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
}
} else {
if !overwrite {
LOG_ERROR("DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to continue", entry.Path)
@@ -1039,9 +1187,83 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
}
}
if inPlace {
if existingFile == nil {
inPlace = false
fileHash := ""
if existingFile != nil {
if inPlace {
// In inplace mode, we only consider chunks in the existing file with the same offsets, so we
// break the original file at offsets retrieved from the backup
fileHasher := manager.config.NewFileHasher()
buffer := make([]byte, 64 * 1024)
err = nil
// We set to read one more byte so the file hash will be different if the file to be restored is a
// truncated portion of the existing file
for i := entry.StartChunk; i <= entry.EndChunk + 1; i++ {
hasher := manager.config.NewKeyedHasher(manager.config.HashKey)
chunkSize := 1 // the size of extra chunk beyond EndChunk
if i == entry.StartChunk {
chunkSize -= entry.StartOffset
} else if i == entry.EndChunk {
chunkSize = entry.EndOffset
} else if i > entry.StartChunk && i < entry.EndChunk {
chunkSize = chunkDownloader.taskList[i].chunkLength
}
count := 0
for count < chunkSize {
n := chunkSize - count
if n > cap(buffer) {
n = cap(buffer)
}
n, err := existingFile.Read(buffer[:n])
if n > 0 {
hasher.Write(buffer[:n])
fileHasher.Write(buffer[:n])
count += n
}
if err == io.EOF {
break
}
if err != nil {
LOG_ERROR("DOWNLOAD_SPLIT", "Failed to read existing file: %v", err)
return false
}
}
if count > 0 {
hash := string(hasher.Sum(nil))
existingChunks = append(existingChunks, hash)
existingLengths = append(existingLengths, chunkSize)
offsetMap[hash] = offset
lengthMap[hash] = chunkSize
offset += int64(chunkSize)
}
if err == io.EOF {
break
}
}
fileHash = hex.EncodeToString(fileHasher.Sum(nil))
} else {
// If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so
// we run the chunk maker to split the original file.
chunkMaker.ForEachChunk(
existingFile,
func (chunk *Chunk, final bool) {
hash := chunk.GetHash()
chunkSize := chunk.GetLength()
existingChunks = append(existingChunks, hash)
existingLengths = append(existingLengths, chunkSize)
offsetMap[hash] = offset
lengthMap[hash] = chunkSize
offset += int64(chunkSize)
},
func (fileSize int64, hash string) (io.Reader, bool) {
fileHash = hash
return nil, false
})
}
if fileHash == entry.Hash && fileHash != "" {
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
return false
}
}
@@ -1057,11 +1279,20 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
LOG_TRACE("DOWNLOAD_INPLACE", "Updating %s in place", fullPath)
existingFile.Close()
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
if err != nil {
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
return false
if existingFile == nil {
// Create an empty file
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
if err != nil {
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing", fullPath)
}
} else {
// Close and reopen in a different mode
existingFile.Close()
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
if err != nil {
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
return false
}
}
existingFile.Seek(0, 0)
@@ -1128,7 +1359,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
// Verify the download by hash
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Hash {
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
fullPath, "", entry.Hash)
return false
@@ -1201,7 +1432,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
}
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Hash {
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
entry.Path, hash, entry.Hash)
return false
@@ -1334,6 +1565,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
} else {
LOG_INFO("SNAPSHOT_COPY", "Copied chunk %s (%d/%d)", chunk.GetID(), chunkIndex, len(chunks))
}
otherManager.config.PutChunk(chunk)
})
chunkUploader.Start()
@@ -1347,7 +1579,10 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
i := chunkDownloader.AddChunk(chunkHash)
chunk := chunkDownloader.WaitForChunk(i)
chunkUploader.StartChunk(chunk, chunkIndex)
newChunk := otherManager.config.GetChunk()
newChunk.Reset(true)
newChunk.Write(chunk.GetBytes())
chunkUploader.StartChunk(newChunk, chunkIndex)
}
chunkDownloader.Stop()

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -104,6 +104,27 @@ func modifyFile(path string, portion float32) {
}
}
func checkExistence(t *testing.T, path string, exists bool, isDir bool) {
stat, err := os.Stat(path)
if exists {
if err != nil {
t.Errorf("%s does not exist: %v", path, err)
} else if isDir {
if !stat.Mode().IsDir() {
t.Errorf("%s is not a directory", path)
}
} else {
if stat.Mode().IsDir() {
t.Errorf("%s is not a file", path)
}
}
} else {
if err == nil || !os.IsNotExist(err) {
t.Errorf("%s may exist: %v", path, err)
}
}
}
func truncateFile(path string) {
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
@@ -173,6 +194,9 @@ func TestBackupManager(t *testing.T) {
os.Mkdir(testDir + "/repository1", 0700)
os.Mkdir(testDir + "/repository1/dir1", 0700)
os.Mkdir(testDir + "/repository1/.duplicacy", 0700)
os.Mkdir(testDir + "/repository2", 0700)
os.Mkdir(testDir + "/repository2/.duplicacy", 0700)
maxFileSize := 1000000
//maxFileSize := 200000
@@ -215,11 +239,14 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager := CreateBackupManager("host1", storage, testDir, password)
backupManager.SetupSnapshotCache(testDir + "/repository1", "default")
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir + "/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
@@ -240,8 +267,10 @@ func TestBackupManager(t *testing.T) {
modifyFile(testDir + "/repository1/file2", 0.2)
modifyFile(testDir + "/repository1/dir1/file3", 0.3)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir + "/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
@@ -253,11 +282,25 @@ func TestBackupManager(t *testing.T) {
}
}
// Truncate file2 and add a few empty directories
truncateFile(testDir + "/repository1/file2")
os.Mkdir(testDir + "/repository1/dir2", 0700)
os.Mkdir(testDir + "/repository1/dir2/dir3", 0700)
os.Mkdir(testDir + "/repository1/dir4", 0700)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
time.Sleep(time.Duration(delay) * time.Second)
// Create some directories and files under repository2 that will be deleted during restore
os.Mkdir(testDir + "/repository2/dir5", 0700)
os.Mkdir(testDir + "/repository2/dir5/dir6", 0700)
os.Mkdir(testDir + "/repository2/dir7", 0700)
createRandomFile(testDir + "/repository2/file4", 100)
createRandomFile(testDir + "/repository2/dir5/file5", 100)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir + "/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
/*deleteMode=*/true, /*showStatistics=*/false, /*patterns=*/nil)
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -267,9 +310,22 @@ func TestBackupManager(t *testing.T) {
}
}
// These files/dirs should not exist because deleteMode == true
checkExistence(t, testDir + "/repository2/dir5", false, false);
checkExistence(t, testDir + "/repository2/dir5/dir6", false, false);
checkExistence(t, testDir + "/repository2/dir7", false, false);
checkExistence(t, testDir + "/repository2/file4", false, false);
checkExistence(t, testDir + "/repository2/dir5/file5", false, false);
// These empty dirs should exist
checkExistence(t, testDir + "/repository2/dir2", true, true);
checkExistence(t, testDir + "/repository2/dir2/dir3", true, true);
checkExistence(t, testDir + "/repository2/dir4", true, true);
// Remove file2 and dir1/file3 and restore them from revision 3
os.Remove(testDir + "/repository1/file2")
os.Remove(testDir + "/repository1/dir1/file3")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Restore(testDir + "/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -154,6 +154,18 @@ func (chunk *Chunk) GetID() string {
return chunk.id
}
func (chunk *Chunk) VerifyID() {
hasher := chunk.config.NewKeyedHasher(chunk.config.HashKey)
hasher.Write(chunk.buffer.Bytes())
hash := hasher.Sum(nil)
hasher = chunk.config.NewKeyedHasher(chunk.config.IDKey)
hasher.Write([]byte(hash))
chunkID := hex.EncodeToString(hasher.Sum(nil))
if chunkID != chunk.GetID() {
LOG_ERROR("CHUNK_ID", "The chunk id should be %s instead of %s, length: %d", chunkID, chunk.GetID(), len(chunk.buffer.Bytes()))
}
}
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -45,8 +45,8 @@ type ChunkDownloader struct {
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
startTime int64 // The time it starts downloading
totalFileSize int64 // Total file size
downloadedFileSize int64 // Downloaded file size
totalChunkSize int64 // Total chunk size
downloadedChunkSize int64 // Downloaded chunk size
numberOfDownloadedChunks int // The number of chunks that have been downloaded
numberOfDownloadingChunks int // The number of chunks still being downloaded
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
@@ -95,7 +95,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
downloader.taskList = nil
lastChunkIndex := -1
maximumChunks := 0
downloader.totalFileSize = 0
downloader.totalChunkSize = 0
for _, file := range files {
if file.Size == 0 {
continue
@@ -109,6 +109,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
needed: false,
}
downloader.taskList = append(downloader.taskList, task)
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
} else {
downloader.taskList[len(downloader.taskList) - 1].needed = true
}
@@ -119,7 +120,6 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
if file.EndChunk - file.StartChunk > maximumChunks {
maximumChunks = file.EndChunk - file.StartChunk
}
downloader.totalFileSize += file.Size
}
}
@@ -177,12 +177,6 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
return
}
for i := downloader.lastChunkIndex; i < chunkIndex; i++ {
if !downloader.taskList[i].isDownloading {
atomic.AddInt64(&downloader.downloadedFileSize, int64(downloader.taskList[i].chunkLength))
}
}
for i, _ := range downloader.completedTasks {
if i < chunkIndex && downloader.taskList[i].chunk != nil {
downloader.config.PutChunk(downloader.taskList[i].chunk)
@@ -320,7 +314,11 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
if !exist {
// A chunk is not found. This is a serious error and hopefully it will never happen.
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
if err != nil {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
}
return false
}
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
@@ -353,21 +351,20 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
}
}
if (downloader.showStatistics || IsTracing()) && downloader.totalFileSize > 0 {
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
atomic.AddInt64(&downloader.downloadedFileSize, int64(chunk.GetLength()))
downloadFileSize := atomic.LoadInt64(&downloader.downloadedFileSize)
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
now := time.Now().Unix()
if now <= downloader.startTime {
now = downloader.startTime + 1
}
speed := downloadFileSize / (now - downloader.startTime)
speed := downloadedChunkSize / (now - downloader.startTime)
remainingTime := int64(0)
if speed > 0 {
remainingTime = (downloader.totalFileSize - downloadFileSize) / speed + 1
remainingTime = (downloader.totalChunkSize - downloadedChunkSize) / speed + 1
}
percentage := float32(downloadFileSize * 1000 / downloader.totalFileSize)
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
task.chunkIndex + 1, chunk.GetLength(),
PrettySize(speed), PrettyTime(remainingTime), percentage / 10)

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -146,7 +146,6 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
}
for {
startNewChunk()
maker.bufferStart = 0
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
count, err := reader.Read(maker.buffer[maker.bufferStart : maker.minimumChunkSize])
@@ -174,10 +173,14 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
return
} else {
endOfChunk(chunk, false)
startNewChunk()
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
} else {
endOfChunk(chunk, false)
startNewChunk()
}
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -92,6 +92,11 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
chunkSize := chunk.GetLength()
chunkID := chunk.GetID()
// For a snapshot chunk, verify that its chunk id is correct
if uploader.snapshotCache != nil {
chunk.VerifyID()
}
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
// Save a copy to the local snapshot.
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -104,7 +104,7 @@ func TestUploaderAndDownloader(t *testing.T) {
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
chunkDownloader.totalFileSize = int64(totalFileSize)
chunkDownloader.totalChunkSize = int64(totalFileSize)
for _, chunk := range chunks {
chunkDownloader.AddChunk(chunk.GetHash())

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -225,8 +225,41 @@ func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
}
}
var SkipFileHash = false
func init() {
if value, found := os.LookupEnv("DUPLICACY_SKIP_FILE_HASH"); found && value != "" && value != "0" {
SkipFileHash = true
}
}
// Implement a dummy hasher to be used when SkipFileHash is true.
type DummyHasher struct {
}
func (hasher *DummyHasher) Write(p []byte) (int, error) {
return len(p), nil
}
func (hasher *DummyHasher) Sum(b []byte) []byte {
return []byte("")
}
func (hasher *DummyHasher) Reset() {
}
func (hasher *DummyHasher) Size() int {
return 0
}
func (hasher *DummyHasher) BlockSize() int {
return 0
}
func (config *Config) NewFileHasher() hash.Hash {
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
if SkipFileHash {
return &DummyHasher {}
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
hasher, _ := blake2.New(&blake2.Config{ Size: 32 })
return hasher
} else {

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,7 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
@@ -22,6 +21,7 @@ import (
// This is the hidden directory in the repository for storing various files.
var DUPLICACY_DIRECTORY = ".duplicacy"
var DUPLICACY_FILE = ".duplicacy"
// Regex for matching 'StartChunk:StartOffset:EndChunk:EndOffset'
var contentRegex = regexp.MustCompile(`^([0-9]+):([0-9]+):([0-9]+):([0-9]+)`)

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -18,19 +18,25 @@ import (
type FileStorage struct {
RateLimitedStorage
minimumLevel int // The minimum level of directories to dive into before searching for the chunk file.
isCacheNeeded bool // Network storages require caching
storageDir string
numberOfThreads int
}
// CreateFileStorage creates a file storage.
func CreateFileStorage(storageDir string, threads int) (storage *FileStorage, err error) {
func CreateFileStorage(storageDir string, minimumLevel int, isCacheNeeded bool, threads int) (storage *FileStorage, err error) {
var stat os.FileInfo
stat, err = os.Stat(storageDir)
if os.IsNotExist(err) {
err = os.MkdirAll(storageDir, 0744)
if err != nil {
if err != nil {
if os.IsNotExist(err) {
err = os.MkdirAll(storageDir, 0744)
if err != nil {
return nil, err
}
} else {
return nil, err
}
} else {
@@ -45,6 +51,8 @@ func CreateFileStorage(storageDir string, threads int) (storage *FileStorage, er
storage = &FileStorage {
storageDir : storageDir,
minimumLevel: minimumLevel,
isCacheNeeded: isCacheNeeded,
numberOfThreads: threads,
}
@@ -128,16 +136,18 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
suffix = ".fsl"
}
// The minimum level of directories to dive into before searching for the chunk file.
minimumLevel := 2
for level := 0; level * 2 < len(chunkID); level ++ {
if level >= minimumLevel {
if level >= storage.minimumLevel {
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
if stat, err := os.Stat(filePath); err == nil && !stat.IsDir() {
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
// really a need to follow the link if filePath is a link.
stat, err := os.Lstat(filePath)
if err != nil {
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
} else if stat.IsDir() {
return filePath[len(storage.storageDir) + 1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
} else {
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
} else if err == nil && stat.IsDir() {
return filePath[len(storage.storageDir) + 1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
}
}
@@ -149,7 +159,7 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
continue
}
if level < minimumLevel {
if level < storage.minimumLevel {
// Create the subdirectory if it doesn't exist.
if err == nil && !stat.IsDir() {
@@ -158,9 +168,12 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
err = os.Mkdir(subDir, 0744)
if err != nil {
return "", false, 0, err
// The directory may have been created by other threads so check it again.
stat, _ := os.Stat(subDir)
if stat == nil || !stat.IsDir() {
return "", false, 0, err
}
}
dir = subDir
continue
}
@@ -170,9 +183,7 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
}
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
chunkID)
return "", false, 0, nil
return "", false, 0, fmt.Errorf("The maximum level of directories searched")
}
@@ -237,7 +248,7 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *FileStorage) IsCacheNeeded () (bool) { return false }
func (storage *FileStorage) IsCacheNeeded () (bool) { return storage.isCacheNeeded }
// If the 'MoveFile' method is implemented.
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -32,6 +32,7 @@ type GCDStorage struct {
idCacheLock *sync.Mutex
backoff int
isConnected bool
numberOfThreads int
TestMode bool
@@ -64,6 +65,12 @@ func (storage *GCDStorage) shouldRetry(err error) (bool, error) {
// User Rate Limit Exceeded
message = "User Rate Limit Exceeded"
retry = true
} else if e.Code == 401 {
// Only retry on authorization error when storage has been connected before
if storage.isConnected {
message = "Authorization Error"
retry = true
}
}
} else if e, ok := err.(*url.Error); ok {
message = e.Error()
@@ -295,6 +302,8 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
}
}
storage.isConnected = true
return storage, nil
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,11 +1,12 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"fmt"
"net"
"time"
"sync"
"bytes"
@@ -64,7 +65,17 @@ func NewHubicClient(tokenFile string) (*HubicClient, error) {
}
client := &HubicClient{
HTTPClient: http.DefaultClient,
HTTPClient: &http.Client {
Transport: &http.Transport {
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 60 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
ExpectContinueTimeout: 10 * time.Second,
},
},
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
// +build !windows

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -160,6 +160,9 @@ const (
otherExitCode = 101
)
// This is the function to be called before exiting when an error occurs.
var RunAtError func() = func() {}
func CatchLogException() {
if r := recover(); r != nil {
switch e := r.(type) {
@@ -167,10 +170,12 @@ func CatchLogException() {
if printStackTrace {
debug.PrintStack()
}
RunAtError()
os.Exit(duplicacyExitCode)
default:
fmt.Fprintf(os.Stderr, "%v\n", e)
debug.PrintStack()
RunAtError()
os.Exit(otherExitCode)
}
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -9,6 +9,7 @@ import (
"time"
"sync"
"bytes"
"strings"
"io/ioutil"
"encoding/json"
"io"
@@ -41,6 +42,7 @@ type OneDriveClient struct {
Token *oauth2.Token
TokenLock *sync.Mutex
IsConnected bool
TestMode bool
}
@@ -115,9 +117,27 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
response, err = client.HTTPClient.Do(request)
if err != nil {
if client.IsConnected {
if strings.Contains(err.Error(), "TLS handshake timeout") {
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
// idle connections are not to be reused on reconnect.
retryAfter := time.Duration(rand.Float32() * 60000 + 180000)
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
time.Sleep(retryAfter * time.Millisecond)
} else {
// For all other errors just blindly retry until the maximum is reached
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "%v; retry after %d milliseconds", err, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
}
backoff *= 2
continue
}
return nil, 0, err
}
client.IsConnected = true
if response.StatusCode < 400 {
return response.Body, response.ContentLength, nil
}
@@ -128,12 +148,6 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
Error: OneDriveError { Status: response.StatusCode },
}
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
}
errorResponse.Error.Status = response.StatusCode
if response.StatusCode == 401 {
if url == OneDriveRefreshTokenURL {
@@ -145,13 +159,18 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
return nil, 0, err
}
continue
} else if response.StatusCode == 500 || response.StatusCode == 503 || response.StatusCode == 509 {
} else if response.StatusCode > 401 && response.StatusCode != 404 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
} else {
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
}
errorResponse.Error.Status = response.StatusCode
return nil, 0, errorResponse.Error
}
}
@@ -169,7 +188,7 @@ func (client *OneDriveClient) RefreshToken() (err error) {
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
if err != nil {
return err
return fmt.Errorf("failed to refresh the access token: %v", err)
}
defer readCloser.Close()

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -9,6 +9,7 @@ import (
"path"
"io/ioutil"
"reflect"
"os"
)
// Preference stores options for each storage.
@@ -23,11 +24,39 @@ type Preference struct {
Keys map[string]string `json:"keys"`
}
var preferencePath string
var Preferences [] Preference
func LoadPreferences(repository string) (bool) {
func LoadPreferences(repository string) bool {
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY)
stat, err := os.Stat(preferencePath)
if err != nil {
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", repository, err)
return false
}
if !stat.IsDir() {
content, err := ioutil.ReadFile(preferencePath)
if err != nil {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
return false
}
realPreferencePath := string(content)
stat, err := os.Stat(realPreferencePath)
if err != nil {
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
return false
}
if !stat.IsDir() {
LOG_ERROR("PREFERENCE_PATH", "The preference path %s is not a directory", realPreferencePath)
}
description, err := ioutil.ReadFile(path.Join(repository, DUPLICACY_DIRECTORY, "preferences"))
preferencePath = realPreferencePath
}
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
if err != nil {
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
return false
@@ -47,14 +76,28 @@ func LoadPreferences(repository string) (bool) {
return true
}
func SavePreferences(repository string) (bool) {
func GetDuplicacyPreferencePath() string {
if preferencePath == "" {
LOG_ERROR("PREFERENCE_PATH", "The preference path has not been set")
return ""
}
return preferencePath
}
// Normally 'preferencePath' is set in LoadPreferences; however, if LoadPreferences is not called, this function
// provide another change to set 'preferencePath'
func SetDuplicacyPreferencePath(p string) {
preferencePath = p
}
func SavePreferences() (bool) {
description, err := json.MarshalIndent(Preferences, "", " ")
if err != nil {
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
return false
}
preferenceFile := path.Join(repository, DUPLICACY_DIRECTORY, "/preferences")
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
err = ioutil.WriteFile(preferenceFile, description, 0644)
if err != nil {
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)

212
src/duplicacy_s3cstorage.go Normal file
View File

@@ -0,0 +1,212 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"time"
"github.com/gilbertchen/goamz/aws"
"github.com/gilbertchen/goamz/s3"
)
// S3CStorage is a storage backend for s3 compatible storages that require V2 Signing.
type S3CStorage struct {
RateLimitedStorage
buckets []*s3.Bucket
storageDir string
}
// CreateS3CStorage creates a amazon s3 storage object.
func CreateS3CStorage(regionName string, endpoint string, bucketName string, storageDir string,
accessKey string, secretKey string, threads int) (storage *S3CStorage, err error) {
var region aws.Region
if endpoint == "" {
if regionName == "" {
regionName = "us-east-1"
}
region = aws.Regions[regionName]
} else {
region = aws.Region { Name: regionName, S3Endpoint:"https://" + endpoint }
}
auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey }
var buckets []*s3.Bucket
for i := 0; i < threads; i++ {
s3Client := s3.New(auth, region)
s3Client.AttemptStrategy = aws.AttemptStrategy{
Min: 8,
Total: 300 * time.Second,
Delay: 1000 * time.Millisecond,
}
bucket := s3Client.Bucket(bucketName)
buckets = append(buckets, bucket)
}
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
storage = &S3CStorage {
buckets: buckets,
storageDir: storageDir,
}
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
dir += "/"
}
dirLength := len(storage.storageDir + dir)
if dir == "snapshots/" {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "/", "", 100)
if err != nil {
return nil, nil, err
}
for _, subDir := range results.CommonPrefixes {
files = append(files, subDir[dirLength:])
}
return files, nil, nil
} else if dir == "chunks/" {
marker := ""
for {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", marker, 1000)
if err != nil {
return nil, nil, err
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
sizes = append(sizes, object.Size)
}
if !results.IsTruncated {
break
}
marker = results.Contents[len(results.Contents) - 1].Key
}
return files, sizes, nil
} else {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", "", 1000)
if err != nil {
return nil, nil, err
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
}
return files, nil, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *S3CStorage) DeleteFile(threadIndex int, filePath string) (err error) {
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
}
// MoveFile renames the file.
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
options := s3.CopyOptions { ContentType: "application/duplicacy" }
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir + to, s3.Private, options, storage.buckets[threadIndex].Name + "/" + storage.storageDir + from)
if err != nil {
return nil
}
return storage.DeleteFile(threadIndex, from)
}
// CreateDirectory creates a new directory.
func (storage *S3CStorage) CreateDirectory(threadIndex int, dir string) (err error) {
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
response, err := storage.buckets[threadIndex].Head(storage.storageDir + filePath, nil)
if err != nil {
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
if response.StatusCode == 403 || response.StatusCode == 404 {
return false, false, 0, nil
} else {
return true, false, response.ContentLength, nil
}
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *S3CStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
if err != nil {
return "", false, 0, err
} else {
return filePath, exist, size, err
}
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
if err != nil {
return err
}
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.buckets))
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
options := s3.Options { }
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.buckets))
return storage.buckets[threadIndex].PutReader(storage.storageDir + filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *S3CStorage) IsCacheNeeded () (bool) { return true }
// If the 'MoveFile' method is implemented.
func (storage *S3CStorage) IsMoveFileImplemented() (bool) { return true }
// If the storage can guarantee strong consistency.
func (storage *S3CStorage) IsStrongConsistent() (bool) { return false }
// If the storage supports fast listing of files names.
func (storage *S3CStorage) IsFastListing() (bool) { return true }
// Enable the test mode.
func (storage *S3CStorage) EnableTestMode() {}

View File

@@ -1,61 +1,74 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"time"
"github.com/gilbertchen/goamz/aws"
"github.com/gilbertchen/goamz/s3"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type S3Storage struct {
RateLimitedStorage
buckets []*s3.Bucket
client *s3.S3
bucket string
storageDir string
numberOfThreads int
}
// CreateS3Storage creates a amazon s3 storage object.
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
accessKey string, secretKey string, threads int) (storage *S3Storage, err error) {
accessKey string, secretKey string, threads int,
isSSLSupported bool, isMinioCompatible bool) (storage *S3Storage, err error) {
var region aws.Region
token := ""
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
if endpoint == "" {
if regionName == "" {
regionName = "us-east-1"
if regionName == "" && endpoint == "" {
defaultRegionConfig := &aws.Config {
Region: aws.String("us-east-1"),
Credentials: auth,
}
region = aws.Regions[regionName]
} else {
region = aws.Region { Name: regionName, S3Endpoint:"https://" + endpoint }
}
s3Client := s3.New(session.New(defaultRegionConfig))
auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey }
response, err := s3Client.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String(bucketName)})
var buckets []*s3.Bucket
for i := 0; i < threads; i++ {
s3Client := s3.New(auth, region)
s3Client.AttemptStrategy = aws.AttemptStrategy{
Min: 8,
Total: 300 * time.Second,
Delay: 1000 * time.Millisecond,
if err != nil {
return nil, err
}
regionName = "us-east-1"
if response.LocationConstraint != nil {
regionName = *response.LocationConstraint
}
bucket := s3Client.Bucket(bucketName)
buckets = append(buckets, bucket)
}
config := &aws.Config {
Region: aws.String(regionName),
Credentials: auth,
Endpoint: aws.String(endpoint),
S3ForcePathStyle: aws.Bool(isMinioCompatible),
DisableSSL: aws.Bool(!isSSLSupported),
}
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
storage = &S3Storage {
buckets: buckets,
client: s3.New(session.New(config)),
bucket: bucketName,
storageDir: storageDir,
numberOfThreads: threads,
}
return storage, nil
}
@@ -65,67 +78,82 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
dir += "/"
}
dirLength := len(storage.storageDir + dir)
if dir == "snapshots/" {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "/", "", 100)
dir = storage.storageDir + dir
input := s3.ListObjectsInput {
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1000),
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
for _, subDir := range results.CommonPrefixes {
files = append(files, subDir[dirLength:])
for _, subDir := range output.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return files, nil, nil
} else if dir == "chunks/" {
} else {
dir = storage.storageDir + dir
marker := ""
for {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", marker, 1000)
input := s3.ListObjectsInput {
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000),
Marker: aws.String(marker),
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
sizes = append(sizes, object.Size)
for _, object := range output.Contents {
files = append(files, (*object.Key)[len(dir):])
sizes = append(sizes, *object.Size)
}
if !results.IsTruncated {
if !*output.IsTruncated {
break
}
marker = results.Contents[len(results.Contents) - 1].Key
marker = *output.Contents[len(output.Contents) - 1].Key
}
return files, sizes, nil
}
} else {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", "", 1000)
if err != nil {
return nil, nil, err
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
}
return files, nil, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
input := &s3.DeleteObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
_, err = storage.client.DeleteObject(input)
return err
}
// MoveFile renames the file.
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
options := s3.CopyOptions { ContentType: "application/duplicacy" }
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir + to, s3.Private, options, storage.buckets[threadIndex].Name + "/" + storage.storageDir + from)
if err != nil {
return nil
input := &s3.CopyObjectInput {
Bucket: aws.String(storage.bucket),
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
Key: aws.String(storage.storageDir + to),
}
_, err = storage.client.CopyObject(input)
if err != nil {
return err
}
return storage.DeleteFile(threadIndex, from)
}
// CreateDirectory creates a new directory.
@@ -136,19 +164,24 @@ func (storage *S3Storage) CreateDirectory(threadIndex int, dir string) (err erro
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
response, err := storage.buckets[threadIndex].Head(storage.storageDir + filePath, nil)
input := &s3.HeadObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
output, err := storage.client.HeadObject(input)
if err != nil {
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
if e, ok := err.(awserr.RequestFailure); ok && (e.StatusCode() == 403 || e.StatusCode() == 404) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
}
if response.StatusCode == 403 || response.StatusCode == 404 {
if output == nil || output.ContentLength == nil {
return false, false, 0, nil
} else {
return true, false, response.ContentLength, nil
return true, false, *output.ContentLength, nil
}
}
@@ -174,14 +207,19 @@ func (storage *S3Storage) FindChunk(threadIndex int, chunkID string, isFossil bo
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
input := &s3.GetObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
output, err := storage.client.GetObject(input)
if err != nil {
return err
}
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.buckets))
defer output.Body.Close()
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.bucket))
return err
}
@@ -189,9 +227,16 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
options := s3.Options { }
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.buckets))
return storage.buckets[threadIndex].PutReader(storage.storageDir + filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
input := &s3.PutObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
ACL: aws.String(s3.ObjectCannedACLPrivate),
Body: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.bucket)),
ContentType: aws.String("application/duplicacy"),
}
_, err = storage.client.PutObject(input)
return err
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -215,7 +215,11 @@ func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil
err = storage.client.Mkdir(subDir)
if err != nil {
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
// The directory may have been created by other threads so check it again.
stat, _ := storage.client.Stat(subDir)
if stat == nil || !stat.IsDir() {
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
}
}
dir = subDir
@@ -281,7 +285,7 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
storage.client.Remove(temporaryFile)
return nil
} else {
return fmt.Errorf("Uploaded file but failed to store it at %s", fullPath)
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
}
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
// +build !windows

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -9,7 +9,6 @@ import (
"unsafe"
"time"
"os"
"path"
"runtime"
ole "github.com/gilbertchen/go-ole"
@@ -509,8 +508,9 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
shadowLink = path.Join(top, DUPLICACY_DIRECTORY) + "\\shadow"
preferencePath := GetDuplicacyPreferencePath()
shadowLink = preferencePath + "\\shadow"
os.Remove(shadowLink)
err = os.Symlink(snapshotPath + "\\", shadowLink)
if err != nil {

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -67,7 +67,8 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
}
var patterns []string
patternFile, err := ioutil.ReadFile(path.Join(top, DUPLICACY_DIRECTORY, "filters"))
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
if err == nil {
for _, pattern := range strings.Split(string(patternFile), "\n") {
pattern = strings.TrimSpace(pattern)
@@ -136,6 +137,100 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
return snapshot, skippedDirectories, skippedFiles, nil
}
// This is the struct used to save/load incomplete snapshots
type IncompleteSnapshot struct {
Files [] *Entry
ChunkHashes []string
ChunkLengths [] int
}
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
description, err := ioutil.ReadFile(snapshotFile)
if err != nil {
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
return nil
}
var incompleteSnapshot IncompleteSnapshot
err = json.Unmarshal(description, &incompleteSnapshot)
if err != nil {
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
return nil
}
var chunkHashes []string
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
hash, err := hex.DecodeString(chunkHash)
if err != nil {
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
return nil
}
chunkHashes = append(chunkHashes, string(hash))
}
snapshot = &Snapshot {
Files: incompleteSnapshot.Files,
ChunkHashes: chunkHashes,
ChunkLengths: incompleteSnapshot.ChunkLengths,
}
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snpashot loaded from %s", snapshotFile)
return snapshot
}
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
func SaveIncompleteSnapshot(snapshot *Snapshot) {
var files []*Entry
for _, file := range snapshot.Files {
// All unprocessed files will have a size of -1
if file.Size >= 0 {
file.Attributes = nil
files = append(files, file)
} else {
break
}
}
var chunkHashes []string
for _, chunkHash := range snapshot.ChunkHashes {
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
}
incompleteSnapshot := IncompleteSnapshot {
Files: files,
ChunkHashes: chunkHashes,
ChunkLengths: snapshot.ChunkLengths,
}
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
if err != nil {
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
return
}
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
err = ioutil.WriteFile(snapshotFile, description, 0644)
if err != nil {
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
return
}
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
}
func RemoveIncompleteSnapshot() {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
err = os.Remove(snapshotFile)
if err != nil {
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
} else {
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
}
}
}
// CreateSnapshotFromDescription creates a snapshot from json decription.
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -1084,7 +1084,7 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
if alternateHash {
fileHash = "#" + fileHash
}
if strings.ToLower(fileHash) != strings.ToLower(file.Hash) {
if strings.ToLower(fileHash) != strings.ToLower(file.Hash) && !SkipFileHash {
LOG_WARN("SNAPSHOT_HASH", "File %s has mismatched hashes: %s vs %s", file.Path, file.Hash, fileHash)
return false
}
@@ -1092,15 +1092,18 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
}
// FindFile returns the file entry that has the given file name.
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string) (*Entry) {
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) (*Entry) {
for _, entry := range snapshot.Files {
if entry.Path == filePath {
return entry
}
}
LOG_ERROR("SNAPSHOT_FIND", "No file %s found in snapshot %s at revision %d",
filePath, snapshot.ID, snapshot.Revision)
if !suppressError {
LOG_ERROR("SNAPSHOT_FIND", "No file %s found in snapshot %s at revision %d",
filePath, snapshot.ID, snapshot.Revision)
}
return nil
}
@@ -1139,7 +1142,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
return true
}
file := manager.FindFile(snapshot, path)
file := manager.FindFile(snapshot, path, false)
var content [] byte
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
@@ -1197,7 +1200,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
}
var leftFile []byte
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath), func(content []byte) {
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath, false), func(content []byte) {
leftFile = append(leftFile, content...)
}) {
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
@@ -1207,7 +1210,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
var rightFile []byte
if rightSnapshot != nil {
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath), func(content []byte) {
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath, false), func(content []byte) {
rightFile = append(rightFile, content...)
}) {
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
@@ -1376,7 +1379,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
for _, revision := range revisions {
snapshot := manager.DownloadSnapshot(snapshotID, revision)
manager.DownloadSnapshotFileSequence(snapshot, nil)
file := manager.FindFile(snapshot, filePath)
file := manager.FindFile(snapshot, filePath, true)
if file != nil {
@@ -1496,7 +1499,7 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string
// Note that a snapshot being created when step 2 is in progress may reference a fossil. To avoid this
// problem, never remove the lastest revision (unless exclusive is true), and only cache chunks referenced
// by the lastest revision.
func (manager *SnapshotManager) PruneSnapshots(top string, selfID string, snapshotID string, revisionsToBeDeleted []int,
func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, revisionsToBeDeleted []int,
tags []string, retentions []string,
exhaustive bool, exclusive bool, ignoredIDs []string,
dryRun bool, deleteOnly bool, collectOnly bool) bool {
@@ -1510,8 +1513,9 @@ func (manager *SnapshotManager) PruneSnapshots(top string, selfID string, snapsh
if len(revisionsToBeDeleted) > 0 && (len(tags) > 0 || len(retentions) > 0) {
LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified")
}
logDir := path.Join(top, DUPLICACY_DIRECTORY, "logs")
preferencePath := GetDuplicacyPreferencePath()
logDir := path.Join(preferencePath, "logs")
os.Mkdir(logDir, 0700)
logFileName := path.Join(logDir, time.Now().Format("prune-log-20060102-150405"))
logFile, err := os.OpenFile(logFileName, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
@@ -2180,7 +2184,9 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
if len(entries) > 0 && entries[0].StartChunk != 0 {
return fmt.Errorf("The first file starts at chunk %d", entries[0].StartChunk )
}
if lastChunk < numberOfChunks - 1 {
// There may be a last chunk whose size is 0 so we allow this to happen
if lastChunk < numberOfChunks - 2 {
return fmt.Errorf("The last file ends at chunk %d but the number of chunks is %d", lastChunk, numberOfChunks)
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -95,14 +95,14 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
storage, _ := CreateFileStorage(testDir, 1)
storage, _ := CreateFileStorage(testDir, 2, false, 1)
storage.CreateDirectory(0, "chunks")
storage.CreateDirectory(0, "snapshots")
config := CreateConfig()
snapshotManager := CreateSnapshotManager(config, storage)
cacheDir := path.Join(testDir, "cache")
snapshotCache, _ := CreateFileStorage(cacheDir, 1)
snapshotCache, _ := CreateFileStorage(cacheDir, 2, false, 1)
snapshotCache.CreateDirectory(0, "chunks")
snapshotCache.CreateDirectory(0, "snapshots")
@@ -248,11 +248,11 @@ func TestSingleRepositoryPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 1 snapshot")
@@ -261,7 +261,7 @@ func TestSingleRepositoryPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
}
@@ -288,11 +288,11 @@ func TestSingleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
@@ -301,7 +301,7 @@ func TestSingleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 0)
}
@@ -329,11 +329,11 @@ func TestMultipleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
@@ -342,7 +342,7 @@ func TestMultipleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Creating 1 snapshot")
@@ -351,7 +351,7 @@ func TestMultipleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 4, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 4, 0)
}
@@ -376,7 +376,7 @@ func TestPruneAndResurrect(t *testing.T) {
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 1 snapshot")
@@ -385,7 +385,7 @@ func TestPruneAndResurrect(t *testing.T) {
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
}
@@ -413,11 +413,11 @@ func TestInactiveHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
@@ -426,7 +426,7 @@ func TestInactiveHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 0)
}
@@ -454,14 +454,14 @@ func TestRetentionPolicy(t *testing.T) {
checkTestSnapshots(snapshotManager, 30, 0)
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 12, 0)
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -75,14 +75,10 @@ func (storage *RateLimitedStorage) SetRateLimits(downloadRateLimit int, uploadRa
storage.UploadRateLimit = uploadRateLimit
}
func checkHostKey(repository string, hostname string, remote net.Addr, key ssh.PublicKey) error {
if len(repository) == 0 {
return nil
}
duplicacyDirectory := path.Join(repository, DUPLICACY_DIRECTORY)
hostFile := path.Join(duplicacyDirectory, "knowns_hosts")
func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
preferencePath := GetDuplicacyPreferencePath()
hostFile := path.Join(preferencePath, "known_hosts")
file, err := os.OpenFile(hostFile, os.O_RDWR | os.O_CREATE, 0600)
if err != nil {
return err
@@ -126,11 +122,12 @@ func checkHostKey(repository string, hostname string, remote net.Addr, key ssh.P
}
// CreateStorage creates a storage object based on the provide storage URL.
func CreateStorage(repository string, preference Preference, resetPassword bool, threads int) (storage Storage) {
func CreateStorage(preference Preference, resetPassword bool, threads int) (storage Storage) {
storageURL := preference.StorageURL
isFileStorage := false
isCacheNeeded := false
if strings.HasPrefix(storageURL, "/") {
isFileStorage = true
@@ -144,11 +141,30 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
isFileStorage = true
isCacheNeeded = true
}
}
if isFileStorage {
fileStorage, err := CreateFileStorage(storageURL, threads)
fileStorage, err := CreateFileStorage(storageURL, 2, isCacheNeeded, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
}
return fileStorage
}
if strings.HasPrefix(storageURL, "flat://") {
fileStorage, err := CreateFileStorage(storageURL[7:], 0, false, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
}
return fileStorage
}
if strings.HasPrefix(storageURL, "samba://") {
fileStorage, err := CreateFileStorage(storageURL[8:], 2, true, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
@@ -282,7 +298,7 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
}
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
return checkHostKey(repository, hostname, remote, key)
return checkHostKey(hostname, remote, key)
}
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)
@@ -297,7 +313,7 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
SavePassword(preference, "ssh_password", password)
}
return sftpStorage
} else if matched[1] == "s3" {
} else if matched[1] == "s3" || matched[1] == "s3c" || matched[1] == "minio" || matched[1] == "minios" {
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
@@ -323,15 +339,27 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
s3Storage, err := CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
return nil
var err error
if matched[1] == "s3c" {
storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err)
return nil
}
} else {
isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios")
isSSLSupported := (matched[1] == "s3" || matched[1] == "minios")
storage, err = CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads, isSSLSupported, isMinioCompatible)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
return nil
}
}
SavePassword(preference, "s3_id", accessKey)
SavePassword(preference, "s3_secret", secretKey)
return s3Storage
return storage
} else if matched[1] == "dropbox" {
storageDir := matched[3] + matched[5]
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -41,7 +41,7 @@ func init() {
func loadStorage(localStoragePath string, threads int) (Storage, error) {
if testStorageName == "" || testStorageName == "file" {
return CreateFileStorage(localStoragePath, threads)
return CreateFileStorage(localStoragePath, 2, false, threads)
}
config, err := ioutil.ReadFile("test_storage.conf")
@@ -61,17 +61,27 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
}
if testStorageName == "sftp" {
if testStorageName == "flat" {
return CreateFileStorage(localStoragePath, 0, false, threads)
} else if testStorageName == "samba" {
return CreateFileStorage(localStoragePath, 2, true, threads)
} else if testStorageName == "sftp" {
port, _ := strconv.Atoi(storage["port"])
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
} else if testStorageName == "s3" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
} else if testStorageName == "s3" || testStorageName == "wasabi" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
} else if testStorageName == "s3c" {
return CreateS3CStorage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
} else if testStorageName == "minio" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, false, true)
} else if testStorageName == "minios" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, true)
} else if testStorageName == "dropbox" {
return CreateDropboxStorage(storage["token"], storage["directory"], threads)
} else if testStorageName == "b2" {
return CreateB2Storage(storage["account"], storage["key"], storage["bucket"], threads)
} else if testStorageName == "gcs-s3" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
} else if testStorageName == "gcs" {
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
} else if testStorageName == "gcs-sa" {
@@ -448,3 +458,64 @@ func TestStorage(t *testing.T) {
}
}
func TestCleanStorage(t *testing.T) {
setTestingT(t)
SetLoggingLevel(INFO)
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
} ()
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
directories := make([]string, 0, 1024)
directories = append(directories, "snapshots/")
directories = append(directories, "chunks/")
for len(directories) > 0 {
dir := directories[len(directories) - 1]
directories = directories[:len(directories) - 1]
LOG_INFO("LIST_FILES", "Listing %s", dir)
files, _, err := storage.ListFiles(0, dir)
if err != nil {
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
return
}
for _, file := range files {
if len(file) > 0 && file[len(file) - 1] == '/' {
directories = append(directories, dir + file)
} else {
storage.DeleteFile(0, dir + file)
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
}
}
}
storage.DeleteFile(0, "config")
LOG_INFO("DELETE_FILE", "Deleted config")
}

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
@@ -9,7 +9,6 @@ import (
"os"
"bufio"
"io"
"io/ioutil"
"time"
"path"
"path/filepath"
@@ -48,6 +47,16 @@ func (reader *RateLimitedReader) Reset() {
reader.Next = 0
}
func (reader *RateLimitedReader) Seek(offset int64, whence int) (int64, error) {
if whence == io.SeekStart {
reader.Next = int(offset)
} else if whence == io.SeekCurrent {
reader.Next += int(offset)
} else {
reader.Next = len(reader.Content) - int(offset)
}
return int64(reader.Next), nil
}
func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {
@@ -180,54 +189,6 @@ func SavePassword(preference Preference, passwordType string, password string) {
keyringSet(passwordID, password)
}
// RemoveEmptyDirectories remove all empty subdirectoreies under top.
func RemoveEmptyDirectories(top string) {
stack := make([]string, 0, 256)
stack = append(stack, top)
for len(stack) > 0 {
dir := stack[len(stack) - 1]
stack = stack[:len(stack) - 1]
files, err := ioutil.ReadDir(dir)
if err != nil {
continue
}
for _, file := range files {
if file.IsDir() && file.Name()[0] != '.' {
stack = append(stack, path.Join(dir, file.Name()))
}
}
if len(files) == 0 {
if os.Remove(dir) != nil {
continue
}
dir = path.Dir(dir)
for (len(dir) > len(top)) {
files, err := ioutil.ReadDir(dir)
if err != nil {
break
}
if len(files) == 0 {
if os.Remove(dir) != nil {
break;
}
}
dir = path.Dir(dir)
}
}
}
}
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
// Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd'
// against '*ccd', and the version here fixed that issue.

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
// +build !windows

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy

View File

@@ -1,6 +1,6 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Licensed under the Fair Source License 0.9 (https://fair.io/)
// User Limitation: 5 users
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy