Compare commits

..

132 Commits

Author SHA1 Message Date
Gilbert Chen
2e5cbc73b9 Bump version to 2.1.2 2018-11-03 11:45:50 -04:00
Gilbert Chen
21b3d9e57f Padding size was incorrect -- didn't pad to multiples of 256 2018-11-03 11:42:03 -04:00
Gilbert Chen
244b797a1c Print the number of files if available in the snapshot file 2018-11-03 10:38:35 -04:00
Gilbert Chen
073292018c Don't show snapshots whose tags don't match the given one 2018-10-28 23:30:22 -04:00
Gilbert Chen
15f15aa2ca Show more statistics in the check command 2018-10-28 23:27:36 -04:00
Gilbert Chen
d8e13d8d85 Benchmark may incorrectly list the chunks directory when looking for previous temporary files 2018-10-22 09:11:15 -04:00
Gilbert Chen
bfb4b44c0a Optimizating restore to avoid reading newly created sparse file 2018-10-21 22:43:24 -04:00
Gilbert Chen
22a0b222db Align snapshot times to the beginning of days when calculating the differences 2018-09-08 20:31:49 -04:00
Gilbert Chen
674d35e5ca Get accountID from b2_authorize_account and supply it to b2_list_buckets 2018-09-08 20:21:49 -04:00
Gilbert Chen
a7d2a941be Restore UID and GID of symlinks 2018-08-29 17:10:35 -04:00
Gilbert Chen
39d71a3256 Fixed a divide by zero bug when the repository has only zero-byte files 2018-08-10 12:17:40 -04:00
Gilbert Chen
9d10cc77fc Do not update the Windows keyring file if the password remains unchanged 2018-08-08 14:03:49 -04:00
Gilbert Chen
e8b8922754 Continue to check other snapshots when one snapshot has missing chunks 2018-08-06 21:20:04 -04:00
Gilbert Chen
93cc632021 Record deleted snapshots in the fossil collection and if any deleted snapshot still exist nude the fossil collection 2018-08-04 22:59:25 -04:00
Gilbert Chen
48cc5eaedb Print git commit number 2018-08-03 23:45:23 -04:00
Gilbert Chen
f304b64b3f Removed a redundant call to manager.chunkOperator.Resurrect 2018-08-03 11:32:24 -04:00
Gilbert Chen
8ae7d2a97d Remove extra newline in the PRUNE_NEWSNAPSHOT log message 2018-07-26 21:24:33 -04:00
Gilbert Chen
fce4234861 Rearrange struct members to avoid 64-bit int alignment issues 2018-07-26 21:19:03 -04:00
gilbertchen
e499a24202 Merge pull request #459 from jtackaberry/master
Fix "Failed to fossilize chunk" errors in wasabi backend
2018-07-24 21:52:27 -04:00
Gilbert Chen
89769f3906 Add a -storage option to the benchmark command 2018-07-23 22:54:56 -04:00
Gilbert Chen
798cec0714 Bump version to 2.1.1 2018-07-23 22:10:12 -04:00
Gilbert Chen
72dfaa8b6b Fixed a bug causing a new snapshot to be not counted when deciding which fossils can be deleted 2018-07-23 22:08:08 -04:00
Jason Tackaberry
117cfd997f Make Wasabi double slash fix more idiomatic 2018-07-13 12:43:27 -04:00
Jason Tackaberry
84f7c513d5 Fix "Failed to fossilize chunk" errors in wasabi backend
Fixes #458
2018-07-11 22:55:04 -04:00
gilbertchen
dfdbfed64b Merge pull request #449 from gilbertchen/benchmark_command
Benchmark command
2018-07-02 16:26:28 -04:00
gilbertchen
d4a65ffbcf Merge pull request #441 from gilbertchen/threaded_prune
Implement multithreaded pruning
2018-07-02 16:20:41 -04:00
gilbertchen
736003323a Merge pull request #417 from amarcu5/fix-permissions
Fixed restoration of basic UNIX file permissions
2018-07-02 14:31:10 -04:00
gilbertchen
0af74616b7 Merge pull request #415 from amarcu5/master
Add APFS snapshot support
2018-07-02 12:41:19 -04:00
gilbertchen
0f552c8c50 Update ISSUE_TEMPLATE.md 2018-06-20 23:24:59 -04:00
gilbertchen
1adf92e879 Create ISSUE_TEMPLATE.md 2018-06-20 23:24:02 -04:00
Gilbert Chen
f92f1a728c Check in src/duplicacy_benchmark.go 2018-06-17 22:09:47 -04:00
Gilbert Chen
9a0dcdb0b2 Add a benchmark command to test disk and network speeds 2018-06-17 22:00:01 -04:00
Gilbert Chen
9ae306644d Avoid filepath.Dir as it returns paths with back slashes on Windows 2018-06-07 16:00:39 -04:00
Gilbert Chen
6f0166be6d Add a test for multi-threaded pruning 2018-06-06 13:11:19 -04:00
Gilbert Chen
f68eb13584 A few fixes for multi-threaded pruning 2018-06-05 16:09:12 -04:00
Gilbert Chen
dd53b4797e Implement multi-threaded pruning 2018-06-04 21:52:07 -04:00
Gilbert Chen
7e021f26d3 Don't show the nil error when failing to replace a file with a directory during restore 2018-05-30 13:23:35 -04:00
Gilbert Chen
0e585e4be4 Fixed a crashing bug when showing the history of excluded files 2018-05-30 12:05:40 -04:00
Gilbert Chen
e03cd2a880 Add unreferenced fossils to the fossil collection instead of deleting them immediately 2018-05-29 12:57:38 -04:00
Gilbert Chen
f80a5b1025 Fixed format argument errors 2018-05-24 11:37:52 -04:00
Gilbert Chen
aadd2aa390 Add an -enum-only option to the backup command to enumerate the repository only 2018-05-24 11:34:46 -04:00
Gilbert Chen
72239a31c4 Add -repository optiont to init add add to specifiy an alternate repository path 2018-05-22 15:55:52 -04:00
gilbertchen
c9b60cc0e0 Merge pull request #394 from gilbertchen/webdav
Implement the WebDAV backend
2018-05-16 23:31:46 -04:00
gilbertchen
f4cdd1f01b Merge pull request #392 from fhriley/nobackup_file
Add nobackup-file preference.
2018-05-16 23:31:09 -04:00
Gilbert Chen
b1c1b47983 Add an env var DUPLICACY_DECRYPT_WITH_HMACSHA256 to force using HMAC-SHA256 for encryption key in order to be able to manage backups created by Vertical Backup 2018-05-02 22:57:47 -04:00
amarcu5
8c3ef6cae9 Improved cron support
Now calls mount and unmount using absolute path
2018-04-30 00:40:34 +01:00
amarcu5
acd7addc9a Fixed Windows complication 2018-04-30 00:36:11 +01:00
amarcu5
c23ea30da4 Updated VSS flag usage
VSS flag usage now indicates support for macOS using APFS in addition to Windows
2018-04-29 23:24:22 +01:00
amarcu5
a4c46624ea Fix basic UNIX permissions 2018-04-29 20:24:50 +01:00
amarcu5
5747d6763f Improved error handling
Minor improvement to error handling and small formatting changes
2018-04-29 13:33:17 +01:00
amarcu5
ef1d316f33 Cleaned up formatting
Fixed tabbing
2018-04-29 04:21:28 +01:00
amarcu5
714a45c34a Improved macOS VSS checks
VSS support now determined by:
1) Checking that repository filesystem is APFS rather than by inspecting macOS version
2) Checking that repository resides on local device (as external APFS formatted drives are not supported) rather than crudely by path name
2018-04-29 04:11:10 +01:00
Gilbert Chen
23a2d91608 Skipped chunks should not be counted in order to get accurate download percentage 2018-04-28 20:19:24 -04:00
amarcu5
23b98a3034 Added macOS VSS checks
Restricts VSS under macOS to version 10.13 High Sierra or higher and local volume only
2018-04-28 23:09:59 +01:00
amarcu5
8a3c5847a8 Cleaned up formatting 2018-04-28 21:23:00 +01:00
amarcu5
4c3d5dbc2f Add APFS snapshot support
Add's VSS support for macOS using APFS snapshot's
2018-04-28 17:58:47 +01:00
Gilbert Chen
85bc55e374 The -threads option for the copy command specifies the number of uploading threads 2018-04-11 21:11:24 -04:00
gilbertchen
cd0c7b07a9 Update README.md 2018-04-06 22:27:31 -04:00
gilbertchen
0ea26a92dd Update README.md 2018-04-06 09:46:42 -04:00
gilbertchen
ca889fca9f Update README.md 2018-04-06 09:28:05 -04:00
gilbertchen
fbaea6e8b1 Update README.md 2018-04-06 00:06:48 -04:00
gilbertchen
2290c4ace0 Add files via upload 2018-04-05 23:36:57 -04:00
Gilbert Chen
02cd41f4d0 A few improvements to make WebDAV work better with pcloud and box.com 2018-04-05 15:29:41 -04:00
Gilbert Chen
0db8b9831b Implement the WebDAV backend 2018-04-02 20:03:50 -04:00
Frank
4dd5c43307 Add nobackup-file preference.
Directories containing a file with this name will not be backed up. I find it easier to drop a .nobackup file in directories I don't want backed up instead of maintaining a file of exclusions. This is also useful for scripts that create data in the repository but don't want it to be backed up.
2018-04-01 12:50:00 -07:00
Gilbert Chen
6aedc37118 Update the description for the -comment option 2018-03-28 22:19:18 -04:00
gilbertchen
9a56ede07c Merge pull request #391 from jeffaco/jeff-comment
Add -comment capability to allow for duplicacy jobs to be identified with 'ps'
2018-03-28 22:10:58 -04:00
Jeff Coffler
c6e9460b7b Add 'How to build' reference to README.md. 2018-03-28 11:22:46 -07:00
Jeff Coffler
e74ab809ae Add new qualifier, -comment, to more easily identify specific jobs.
This qualifier allows specific text to be associated with a backup
job to easily associate what repository (or other context) a
duplicacy job is running on behalf of.
2018-03-28 10:16:08 -07:00
Gilbert Chen
5d2242d39d Preserve the list of chunk hashes for the latest snapshot when cleaning local snapshot cache 2018-03-27 23:34:40 -04:00
Gilbert Chen
b99f4bffec Follow symlinks that point to paths starting with \\ 2018-03-22 22:37:09 -04:00
Gilbert Chen
be2856ebbd Add a -vss-timeout option to set VSS creation timeout 2018-03-21 22:34:14 -04:00
Gilbert Chen
1ea615fb45 Fix the names of prune tests so they can be run all in once 2018-03-20 15:03:03 -04:00
Gilbert Chen
7d933a2576 Create two identical snapshots in prune test to catch a retrive-after-deletion bug 2018-03-20 14:37:47 -04:00
gilbertchen
13fffc2a11 Merge pull request #329 from pdf/prune_memory
Reduce memory consumption for prune operation
2018-03-20 14:32:12 -04:00
gilbertchen
9658463ebe Merge pull request #331 from markfeit/master
Added semi-dedicated Wasabi storage module.  #322
2018-03-19 12:03:31 -04:00
gilbertchen
cd77a029ea Merge branch 'master' into master 2018-03-19 11:53:11 -04:00
Gilbert Chen
4948806d3d Bump version to 2.1.0 2018-03-09 14:43:06 -05:00
Gilbert Chen
42c317c477 Run dep ensure for release 2.1.0 2018-03-09 14:19:27 -05:00
Gilbert Chen
013eac0cf2 Use github.com/gilbertchen/azure-sdk-for-go to retry on temporary errors 2018-03-09 11:54:06 -05:00
gilbertchen
bc9ccd860f Merge pull request #353 from gilbertchen/openstack_swift
Implement OpenStack Swift backend
2018-03-08 16:08:30 -05:00
gilbertchen
25935ca324 Merge pull request #364 from sergeevabc/patch-1
Fix some typos
2018-03-08 16:08:13 -05:00
Aleksandr Sergeev
bcace5aee2 Fix some typos
deriviation, specifed, acess
2018-02-22 14:57:59 +00:00
Mark Feit
8fdb399e1b Correct handling of @ in region to be consistent with everythng else. 2018-02-11 07:51:34 -05:00
Gilbert Chen
e07226bd62 Retention policy erroneously apply to snapshots without the specified tags 2018-02-10 21:33:01 -05:00
Mark Feit
9d632c0434 Handle application/testing region string inconsistency. 2018-02-10 10:16:04 -05:00
Mark Feit
cc6e96527e Add/rearrange returns to keep the compiler from complaining. 2018-02-10 10:15:40 -05:00
Gilbert Chen
ddf61aee9d Implement OpenStack Swift backend 2018-02-04 13:43:00 -05:00
Gilbert Chen
52fd553bb9 Fixed 2 bugs in restoring extended attributes 2018-01-29 14:38:48 -05:00
Gilbert Chen
7230ddbef5 Clear the attributes from last snapshot after loading to save memory 2018-01-28 16:54:06 -05:00
Gilbert Chen
ffe04d691b Convert spaces in the path only for now 2018-01-23 12:15:50 -05:00
Gilbert Chen
e0d7355494 URLEncode the file path to allow non-ascii characters in the path 2018-01-22 22:58:30 -05:00
Gilbert Chen
d330f61d25 Limit derivation key to 64 bytes since snapshot file path used as key may be longer 2018-01-20 23:52:35 -05:00
Gilbert Chen
e5beb55336 Replace spaces in file paths with %20 (for repository ids with spaces) 2018-01-20 22:59:41 -05:00
Peter Fern
57082cd1d2 Reduce memory consumption for prune operation
For non-exhaustive prune, consider only target chunks instead of mapping
all chunks in repository.
2018-01-21 10:12:09 +11:00
Peter Fern
bd5a689b7d Add keepChunkHashes flag to GetSnapshotChunks, allowing reduced memory 2018-01-21 10:11:59 +11:00
Mark Feit
b52d6b3f7f Incorporated PR feedback; call S3 for IsFastListing() #322 2018-01-18 08:34:33 -05:00
Mark Feit
8aaca37a2b Added note about Wasabi dependency. 2018-01-18 08:30:00 -05:00
gilbertchen
9898f77d9c Merge pull request #327 from jamiesonbecker/patch-1
Adding a few zeroes so the numbers line up.
2018-01-17 22:26:14 -05:00
Mark Feit
30f753e499 Cosmetic and key name fixes. #322 2018-01-16 22:19:41 -05:00
Mark Feit
d0771be2dd Added semi-dedicated Wasabi storage module. #322 2018-01-16 22:06:11 -05:00
Jamieson Becker
25fbc9ad03 Adding a few zeroes so the numbers line up. 2018-01-13 13:11:23 -06:00
Gilbert Chen
91f02768f9 Retry on download errors for Hubic which may return 404 for existing chunks 2018-01-13 00:23:33 -05:00
Gilbert Chen
8e8a116028 Fixed a bug that caused -hash to have no effect 2018-01-11 21:43:42 -05:00
Gilbert Chen
771323510d Don't download a fossil directly; resurrect it and download the chunk instead 2018-01-08 23:58:49 -05:00
Gilbert Chen
61fb0f7b40 Fixed a typo in a log message 2018-01-08 14:42:59 -05:00
Gilbert Chen
f1060491ae Use the official azure-sdk-for-go rather than our fork 2018-01-08 14:14:20 -05:00
Gilbert Chen
837fd5e4fd Add -storage-name to the info command for reading the correct password 2018-01-06 23:33:13 -05:00
gilbertchen
0670f709f3 Merge pull request #298 from jay1337/master
Hubic retry mechanism improvement
2018-01-04 21:16:31 -05:00
Gilbert Chen
f944e01a02 Fix typos 2017-12-15 08:24:15 -05:00
Gilbert Chen
f6ef9094bc Add debugging messages in incomplete snapshot handling 2017-12-15 08:23:56 -05:00
Gilbert Chen
36d7c583fa Refresh token unconditionally on authorization errors 2017-12-15 08:06:23 -05:00
Gilbert Chen
9fdff7b150 Add the global -profile option to enable profiling 2017-12-14 15:34:05 -05:00
Gilbert Chen
dfbc5ece00 Fixed the nesting file name 2017-12-14 13:55:02 -05:00
Gilbert Chen
50d2e2603a Fix the GCD directory creating bug; only save directories in the id cache 2017-12-11 11:17:19 -05:00
Gilbert Chen
61e4329522 Revert "Fixed a bug in creating directories in Google Drive storage backend"
This reverts commit 801433340a.

That fix puts everything in the cache which leads to a memory explosion problem.
The correct fix is to only put directories in the cache.
2017-12-11 08:26:32 -05:00
Gilbert Chen
801433340a Fixed a bug in creating directories in Google Drive storage backend 2017-12-10 23:02:12 -05:00
Jérôme
91a95d0cd3 Hubic retry mechanism improvement:
- longer ResponseHeaderTimeout
- more retries
- no "retryAfter time" lower than 500ms
- retry after StatusCode 408
2017-12-11 00:08:51 +01:00
Gilbert Chen
612f6e27cb Fixed a chunk listing bug in Hubic backend 2017-12-09 23:09:23 -05:00
Gilbert Chen
430d7b6241 Merge branch 'master' of https://github.com/gilbertchen/duplicacy 2017-12-09 17:38:19 -05:00
Gilbert Chen
c5e2032715 Remove existing config and save a local copy when changing password 2017-12-09 17:34:43 -05:00
gilbertchen
048827742c Merge pull request #285 from samcorbin/readme
Update Backblaze's pricing
2017-12-06 23:22:47 -05:00
gilbertchen
0576efe36c Merge pull request #283 from michaelcinquin/master
create the destination folder on gcd storage if it doesn't exist
2017-12-06 23:22:15 -05:00
Gilbert Chen
8bd463288f Add a -storage-name option to specify the name of the storage to be initialized 2017-12-05 13:42:17 -05:00
Gilbert Chen
2f4e7422ca List known repository ids in the info command 2017-12-03 23:36:19 -05:00
Gilbert Chen
9dbf517e8a Remove aes128-cbc from the supported ciphers by HiDrive 2017-12-02 21:14:40 -05:00
samcorbin
e93ee2d776 Update Backblaze's pricing 2017-12-02 13:55:43 +10:30
Michael Cinquin
3371ea445e create the destination folder on gcd storage if it doesn't exist 2017-12-01 12:28:52 +01:00
Gilbert Chen
6f69aff712 Disable caching when retriving files in SnapshotManager 2017-11-27 23:36:09 -05:00
Gilbert Chen
7a7ea3ad18 Update dependency requirement for github.com/gilbertchen/cli 2017-11-26 12:17:07 -05:00
Gilbert Chen
4aa2edb164 Fixed a test build error caused by the new bit-identical argument 2017-11-21 22:22:17 -05:00
Gilbert Chen
29bbd49a1c Retry on any error in the hubic backend 2017-11-21 22:21:18 -05:00
44 changed files with 2968 additions and 618 deletions

5
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,5 @@
Please submit an issue for bug reports or feature requests. If you have any questions please post them on https://forum.duplicacy.com.
When you're reporting a bug, please specify the OS, version, command line arguments, or any info that you think is helpful for the diagnosis. If Duplicacy reports an error, please post the program output here.
Note that this repository hosts the CLI version of Duplicacy only. If you're reporting anything related to the GUI version, please visit https://forum.duplicacy.com.

38
Gopkg.lock generated
View File

@@ -7,11 +7,17 @@
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
version = "v0.16.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = ["version"]
revision = "b7fadebe0e7f5c5720986080a01495bd8d27be37"
version = "v14.2.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "c67b24a8e30d876542a85022ebbdecf0e5a935e8"
version = "v9.4.1"
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
version = "v10.2.0"
[[projects]]
branch = "master"
@@ -38,16 +44,16 @@
version = "v3.1.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/azure-sdk-for-go"
packages = ["storage"]
revision = "2d49bb8f2cee530cc16f1f1a9f0aae763dee257d"
version = "v10.2.1-beta"
revision = "bbf89bd4d716c184f158d1e1428c2dbef4a18307"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/cli"
packages = ["."]
revision = "565493f259bf868adb54d45d5f4c68d405117adf"
version = "v1.2.0"
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
[[projects]]
branch = "master"
@@ -120,12 +126,24 @@
packages = ["."]
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/minio/blake2b-simd"
packages = ["."]
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
[[projects]]
branch = "master"
name = "github.com/ncw/swift"
packages = ["."]
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
@@ -139,10 +157,10 @@
version = "1.0.0"
[[projects]]
name = "github.com/satori/uuid"
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
version = "v1.1.0"
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
@@ -207,6 +225,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "95a162eedee5e915fbd1917c3ba5021e646aa2f13a542c7cbeb02bcf30a3acb9"
inputs-digest = "eff5ae2d9507f0d62cd2e5bdedebb5c59d64f70f476b087c01c35d4a5e1be72d"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -39,11 +39,11 @@
[[constraint]]
name = "github.com/gilbertchen/azure-sdk-for-go"
version = "10.2.1-beta"
branch = "master"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/cli"
version = "1.2.0"
[[constraint]]
branch = "master"

119
README.md
View File

@@ -1,129 +1,92 @@
# Duplicacy: A lock-free deduplication cloud backup tool
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication). It is the only cloud backup tool that allows multiple computers to back up to the same storage simultaneously without using any locks (thus readily amenable to various cloud storage services).
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication).
The repository hosts source code, design documents, and binary releases of the command line version. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
There is a special edition of Duplicacy developed for VMware vSphere (ESXi) named [Vertical Backup](https://www.verticalbackup.com) that can back up virtual machine files on ESXi to local drives, network or cloud storages.
## Features
Duplicacy currently supports major cloud storage providers (Amazon S3, Google Cloud Storage, Microsoft Azure, Dropbox, Backblaze B2, Google Drive, Microsoft OneDrive, and Hubic) and offers all essential features of a modern backup tool:
There are 3 core advantages of Duplicacy over any other open-source or commercial backup tools:
* Incremental backup: only back up what has been changed
* Full snapshot: although each backup is incremental, it must behave like a full snapshot for easy restore and deletion
* Deduplication: identical files must be stored as one copy (file-level deduplication), and identical parts from different files must be stored as one copy (block-level deduplication)
* Encryption: encrypt not only file contents but also file paths, sizes, times, etc.
* Deletion: every backup can be deleted independently without affecting others
* Concurrent access: multiple clients can back up to the same storage at the same time
* Snapshot migration: all or selected snapshots can be migrated from one storage to another
* Duplicacy is the *only* cloud backup tool that allows multiple computers to back up to the same cloud storage, taking advantage of cross-computer deduplication whenever possible, without direct communication among them. This feature literally turns any cloud storage server supporting only a basic set of file operations into a sophisticated deduplication-aware server.
The key idea of **[Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication)** can be summarized as follows:
* Unlike other chunk-based backup tools where chunks are grouped into pack files and a chunk database is used to track which chunks are stored inside each pack file, Duplicacy takes a database-less approach where every chunk is saved independently using its hash as the file name to facilitate quick lookups. The lack of a centralized chunk database not only makes the implementation less error-prone, but also produces a highly maintainable piece of software with plenty of room for development of new features and usability enhancements.
* Use variable-size chunking algorithm to split files into chunks
* Store each chunk in the storage using a file name derived from its hash, and rely on the file system API to manage chunks without using a centralized indexing database
* Apply a *two-step fossil collection* algorithm to remove chunks that become unreferenced after a backup is deleted
* Duplicacy is fast. While the performance wasn't the top-priority design goal, Duplicacy has been shown to outperform other backup tools by a considerable margin, as indicated by the following results obtained from a [benchmarking experiment](https://github.com/gilbertchen/benchmarking) backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other open-source backup tools.
[![Comparison of Duplicacy, restic, Attic, duplicity](https://github.com/gilbertchen/duplicacy/blob/master/images/duplicacy_benchmark_speed.png "Comparison of Duplicacy, restic, Attic, duplicity")](https://github.com/gilbertchen/benchmarking)
## Getting Started
* [A brief introduction](https://github.com/gilbertchen/duplicacy/wiki/Quick-Start)
* [Command references](https://github.com/gilbertchen/duplicacy/wiki)
* [Building from source](https://github.com/gilbertchen/duplicacy/wiki/Installation)
## Storages
With Duplicacy, you can back up files to local or networked drives, SFTP servers, or many cloud storage providers. The following table compares the costs of all cloud storages supported by Duplicacy.
Duplicacy currently provides the following storage backends:
| Type | Storage (monthly) | Upload | Download | API Charge |
|:------------:|:-------------:|:------------------:|:--------------:|:-----------:|
| Amazon S3 | $0.023/GB | free | $0.09/GB | [yes](https://aws.amazon.com/s3/pricing/) |
| Wasabi | $3.99 first 1TB <br> $0.0039/GB additional | free | $.04/GB | no |
| DigitalOcean Spaces| $5 first 250GB <br> $0.02/GB additional | free | first 1TB free <br> $0.01/GB additional| no |
| Backblaze B2 | $0.005/GB | free | $0.02/GB | [yes](https://www.backblaze.com/b2/b2-transactions-price.html) |
| Google Cloud Storage| $0.026/GB | free |$ 0.12/GB | [yes](https://cloud.google.com/storage/pricing) |
| Google Drive | 15GB free <br> $1.99/100GB <br> $9.99/TB | free | free | no |
| Microsoft Azure | $0.0184/GB | free | free | [yes](https://azure.microsoft.com/en-us/pricing/details/storage/blobs/) |
| Microsoft OneDrive | 5GB free <br> $1.99/50GB <br> $5.83/TB | free | free | no |
| Dropbox | 2GB free <br> $8.25/TB | free | free | no |
| Hubic | 25GB free <br> €1/100GB <br> €5/10TB | free | free | no |
* Local disk
* SFTP
* Dropbox
* Amazon S3
* Wasabi
* DigitalOcean Spaces
* Google Cloud Storage
* Microsoft Azure
* Backblaze B2
* Google Drive
* Microsoft OneDrive
* Hubic
* OpenStack Swift
* WebDAV (under beta testing)
* pcloud (via WebDAV)
* Box.com (via WebDAV)
Please consult the [wiki page](https://github.com/gilbertchen/duplicacy/wiki/Storage-Backends) on how to set up Duplicacy to work with each cloud storage.
It should be noted that their performances vary a lot. A [performance comparison](https://github.com/gilbertchen/cloud-storage-comparison) of these storages measured the running times (in seconds) of backing up and restoring the [Linux code base](https://github.com/torvalds/linux) as follows:
For reference, the following chart shows the running times (in seconds) of backing up the [Linux code base](https://github.com/torvalds/linux) to each of those supported storages:
| Storage | initial backup | 2nd | 3rd | initial restore | 2nd | 3rd |
|:--------------------:|:------:|:----:|:-----:|:----:|:-----:|:----:|
| SFTP | 31.5 | 6.6 | 20.6 | 22.5 | 7.8 | 18.4 |
| Amazon S3 | 41.1 | 5.9 | 21.9 | 27.7 | 7.6 | 23.5 |
| Wasabi | 38.7 | 5.7 | 31.7 | 25.7 | 6.5 | 23.2 |
| DigitalOcean Spaces | 51.6 | 7.1 | 31.7 | 29.3 | 6.4 | 27.6 |
| Backblaze B2 | 106.7 | 24.0 | 88.2 | 67.9 | 14.4 | 39.1 |
| Google Cloud Storage | 76.9 | 11.9 | 33.1 | 39.5 | 9.9 | 26.2 |
| Google Drive | 139.3 | 14.7 | 45.2 | 129.4 | 17.8 | 54.4 |
| Microsoft Azure | 35.0 | 5.4 | 20.4 | 30.7 | 7.1 | 21.5 |
| Microsoft OneDrive | 250.0 | 31.6 | 80.2 | 333.4 | 26.2 | 82.0 |
| Dropbox | 267.2 | 35.8 | 113.7 | 164.0 | 31.6 | 80.3 |
For more details please visit https://github.com/gilbertchen/cloud-storage-comparison.
[![Comparison of Cloud Storages](https://github.com/gilbertchen/duplicacy/blob/master/images/duplicacy_benchmark_cloud.png "Comparison of Cloud Storages")](https://github.com/gilbertchen/cloud-storage-comparison)
## Feature Comparison with Other Backup Tools
For complete benchmark results please visit https://github.com/gilbertchen/cloud-storage-comparison.
## Comparison with Other Backup Tools
[duplicity](http://duplicity.nongnu.org) works by applying the rsync algorithm (or more specific, the [librsync](https://github.com/librsync/librsync) library)
to find the differences from previous backups and only then uploading the differences. It is the only existing backup tool with extensive cloud support -- the [long list](http://duplicity.nongnu.org/duplicity.1.html#sect7) of storage backends covers almost every cloud provider one can think of. However, duplicity's biggest flaw lies in its incremental model -- a chain of dependent backups starts with a full backup followed by a number of incremental ones, and ends when another full backup is uploaded. Deleting one backup will render useless all the subsequent backups on the same chain. Periodic full backups are required, in order to make previous backups disposable.
[bup](https://github.com/bup/bup) also uses librsync to split files into chunks but save chunks in the git packfile format. It doesn't support any cloud storage, or deletion of old backups.
[Obnam](http://obnam.org) got the incremental backup model right in the sense that every incremental backup is actually a full snapshot. Although Obnam also splits files into chunks, it does not adopt either the rsync algorithm or the variable-size chunking algorithm. As a result, deletions or insertions of a few bytes will foil the
[deduplication](http://obnam.org/faq/dedup).
Deletion of old backups is possible, but no cloud storages are supported.
Multiple clients can back up to the same storage, but only sequential access is granted by the [locking on-disk data structures](http://obnam.org/locking/).
It is unclear if the lack of cloud backends is due to difficulties in porting the locking data structures to cloud storage APIs.
[Duplicati](https://duplicati.com) is one of the first backup tools that adopt the chunk-based approach to split files into chunks which are then uploaded to the storage. The chunk-based approach got the incremental backup model right in the sense that every incremental backup is actually a full snapshot. As Duplicati splits files into fixed-size chunks, deletions or insertions of a few bytes will foil the deduplication. Cloud support is extensive, but multiple clients can't back up to the same storage location.
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model as Obnam, but embraces the variable-size chunk algorithm for better performance and better deduplication. Deletions of old backup is also supported. However, no cloud backends are implemented, as in Obnam. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model like Duplicati, but embraces the variable-size chunk algorithm for better performance and higher deduplication efficiency (not susceptible to byte insertion and deletion any more). Deletions of old backup is also supported. However, no cloud backends are implemented. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
[not recommended](http://librelist.com/browser//attic/2014/11/11/backing-up-multiple-servers-into-a-single-repository/#e96345aa5a3469a87786675d65da492b) by the developer due to chunk indices being kept in a local cache.
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks, and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be adapted for cloud storage APIs and reduces the level of deduplication.
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks, and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be extended to cloud storages.
[restic](https://restic.github.io) is a more recent addition. It is worth mentioning here because, like Duplicacy, it is written in Go. It uses a format similar to the git packfile format. Multiple clients backing up to the same storage are still guarded by
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks). A prune operation will therefore completely block all other clients connected to the storage from doing their regular backups. Moreover, since most cloud storage services do not provide a locking service, the best effort is to use some basic file operations to simulate a lock, but distributed locking is known to be a hard problem and it is unclear how reliable restic's lock implementation is. A faulty implementation may cause a prune operation to accidentally delete data still in use, resulting in unrecoverable data loss. This is the exact problem that we avoided by taking the lock-free approach.
[restic](https://restic.github.io) is a more recent addition. It uses a format similar to the git packfile format. Multiple clients backing up to the same storage are still guarded by
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks), and because a chunk database is used, deduplication isn't real-time (different clients sharing the same files will upload different copies of the same chunks). A prune operation will completely block all other clients connected to the storage from doing their regular backups. Moreover, since most cloud storage services do not provide a locking service, the best effort is to use some basic file operations to simulate a lock, but distributed locking is known to be a hard problem and it is unclear how reliable restic's lock implementation is. A faulty implementation may cause a prune operation to accidentally delete data still in use, resulting in unrecoverable data loss. This is the exact problem that we avoided by taking the lock-free approach.
The following table compares the feature lists of all these backup tools:
| Feature/Tool | duplicity | bup | Obnam | Attic | restic | **Duplicacy** |
| Feature/Tool | duplicity | bup | Duplicati | Attic | restic | **Duplicacy** |
|:------------------:|:---------:|:---:|:-----------------:|:---------------:|:-----------------:|:-------------:|
| Incremental Backup | Yes | Yes | Yes | Yes | Yes | **Yes** |
| Full Snapshot | No | Yes | Yes | Yes | Yes | **Yes** |
| Compression | Yes | Yes | Yes | Yes | No | **Yes** |
| Deduplication | Weak | Yes | Weak | Yes | Yes | **Yes** |
| Encryption | Yes | Yes | Yes | Yes | Yes | **Yes** |
| Deletion | No | No | Yes | Yes | No | **Yes** |
| Concurrent Access | No | No | Exclusive locking | Not recommended | Exclusive locking | **Lock-free** |
| Cloud Support | Extensive | No | No | No | S3, B2, OpenStack | **S3, GCS, Azure, Dropbox, Backblaze B2, Google Drive, OneDrive, and Hubic**|
| Concurrent Access | No | No | No | Not recommended | Exclusive locking | **Lock-free** |
| Cloud Support | Extensive | No | Extensive | No | Limited | **Extensive** |
| Snapshot Migration | No | No | No | No | No | **Yes** |
## Performance Comparison with Other Backup Tools
Duplicacy is not only more feature-rich but also faster than other backup tools. The following table lists the running times in seconds of backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other tools. Clearly Duplicacy is the fastest by a significant margin.
| | Duplicacy | restic | Attic | duplicity |
|:------------------:|:----------------:|:----------:|:----------:|:-----------:|
| Initial backup | 13.7 | 20.7 | 26.9 | 44.2 |
| 2nd backup | 4.8 | 8.0 | 15.4 | 19.5 |
| 3rd backup | 6.9 | 11.9 | 19.6 | 29.8 |
| 4th backup | 3.3 | 7.0 | 13.7 | 18.6 |
| 5th backup | 9.9 | 11.4 | 19.9 | 28.0 |
| 6th backup | 3.8 | 8.0 | 16.8 | 22.0 |
| 7th backup | 5.1 | 7.8 | 14.3 | 21.6 |
| 8th backup | 9.5 | 13.5 | 18.3 | 35.0 |
| 9th backup | 4.3 | 9.0 | 15.7 | 24.9 |
| 10th backup | 7.9 | 20.2 | 32.2 | 35.0 |
| 11th backup | 4.6 | 9.1 | 16.8 | 28.1 |
| 12th backup | 7.4 | 12.0 | 21.7 | 37.4 |
For more details and other speed comparison results, please visit https://github.com/gilbertchen/benchmarking. There you can also find test scripts that you can use to run your own experiments.
## License
* Free for personal use or commercial trial

View File

@@ -16,6 +16,9 @@ import (
"runtime"
"strconv"
"strings"
"net/http"
_ "net/http/pprof"
"github.com/gilbertchen/cli"
@@ -29,6 +32,7 @@ const (
)
var ScriptEnabled bool
var GitCommit = "unofficial"
func getRepositoryPreference(context *cli.Context, storageName string) (repository string,
preference *duplicacy.Preference) {
@@ -68,6 +72,10 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
}
if storageName == "" {
if duplicacy.Preferences[0].RepositoryPath != "" {
repository = duplicacy.Preferences[0].RepositoryPath
duplicacy.LOG_INFO("REPOSITORY_SET", "Repository set to %s", repository)
}
return repository, &duplicacy.Preferences[0]
}
@@ -77,6 +85,12 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
duplicacy.LOG_ERROR("STORAGE_NONE", "No storage named '%s' is found", storageName)
return "", nil
}
if preference.RepositoryPath != "" {
repository = preference.RepositoryPath
duplicacy.LOG_INFO("REPOSITORY_SET", "Repository set to %s", repository)
}
return repository, preference
}
@@ -138,6 +152,15 @@ func setGlobalOptions(context *cli.Context) {
ScriptEnabled = false
}
address := context.GlobalString("profile")
if address != "" {
go func() {
http.ListenAndServe(address, nil)
}()
}
duplicacy.RunInBackground = context.GlobalBool("background")
}
@@ -216,7 +239,10 @@ func configRepository(context *cli.Context, init bool) {
var storageURL string
if init {
storageName = "default"
storageName = context.String("storage-name")
if len(storageName) == 0 {
storageName = "default"
}
snapshotID = context.Args()[0]
storageURL = context.Args()[1]
} else {
@@ -278,9 +304,14 @@ func configRepository(context *cli.Context, init bool) {
}
}
repositoryPath := ""
if context.String("repository") != "" {
repositoryPath = context.String("repository")
}
preference := duplicacy.Preference{
Name: storageName,
SnapshotID: snapshotID,
RepositoryPath: repositoryPath,
StorageURL: storageURL,
Encrypted: context.Bool("encrypt"),
}
@@ -412,8 +443,12 @@ func configRepository(context *cli.Context, init bool) {
duplicacy.SavePreferences()
if repositoryPath == "" {
repositoryPath = repository
}
duplicacy.LOG_INFO("REPOSITORY_INIT", "%s will be backed up to %s with id %s",
repository, preference.StorageURL, preference.SnapshotID)
repositoryPath, preference.StorageURL, preference.SnapshotID)
}
type TriBool struct {
@@ -498,6 +533,8 @@ func setPreference(context *cli.Context) {
if triBool.IsSet() {
newPreference.DoNotSavePassword = triBool.IsTrue()
}
newPreference.NobackupFile = context.String("nobackup-file")
key := context.String("key")
value := context.String("value")
@@ -589,11 +626,45 @@ func changePassword(context *cli.Context) {
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
}
description, err := json.MarshalIndent(config, "", " ")
if err != nil {
duplicacy.LOG_ERROR("CONFIG_MARSHAL", "Failed to marshal the config: %v", err)
return
}
configPath := path.Join(duplicacy.GetDuplicacyPreferencePath(), "config")
err = ioutil.WriteFile(configPath, description, 0600)
if err != nil {
duplicacy.LOG_ERROR("CONFIG_SAVE", "Failed to save the old config to %s: %v", configPath, err)
return
}
duplicacy.LOG_INFO("CONFIG_SAVE", "The old config has been temporarily saved to %s", configPath)
removeLocalCopy := false
defer func() {
if removeLocalCopy {
err = os.Remove(configPath)
if err != nil {
duplicacy.LOG_WARN("CONFIG_CLEAN", "Failed to delete %s: %v", configPath, err)
} else {
duplicacy.LOG_INFO("CONFIG_CLEAN", "The local copy of the old config has been removed")
}
}
} ()
err = storage.DeleteFile(0, "config")
if err != nil {
duplicacy.LOG_ERROR("CONFIG_DELETE", "Failed to delete the old config from the storage: %v", err)
return
}
duplicacy.UploadConfig(storage, config, newPassword, iterations)
duplicacy.SavePassword(*preference, "password", newPassword)
duplicacy.LOG_INFO("STORAGE_SET", "The password for storage %s has been changed", preference.StorageURL)
removeLocalCopy = true
}
func backupRepository(context *cli.Context) {
@@ -640,16 +711,18 @@ func backupRepository(context *cli.Context) {
showStatistics := context.Bool("stats")
enableVSS := context.Bool("vss")
vssTimeout := context.Int("vss-timeout")
dryRun := context.Bool("dry-run")
uploadRateLimit := context.Int("limit-rate")
enumOnly := context.Bool("enum-only")
storage.SetRateLimits(0, uploadRateLimit)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SetDryRun(dryRun)
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS)
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly)
runScript(context, preference.Name, "post")
}
@@ -733,7 +806,7 @@ func restoreRepository(context *cli.Context) {
duplicacy.LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(duplicacy.RegexMap))
storage.SetRateLimits(context.Int("limit-rate"), 0)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -773,7 +846,7 @@ func listSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
duplicacy.SavePassword(*preference, "password", password)
id := preference.SnapshotID
@@ -821,7 +894,7 @@ func checkSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
duplicacy.SavePassword(*preference, "password", password)
id := preference.SnapshotID
@@ -876,7 +949,7 @@ func printFile(context *cli.Context) {
snapshotID = context.String("id")
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -932,11 +1005,11 @@ func diff(context *cli.Context) {
}
compareByHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile)
runScript(context, preference.Name, "post")
}
@@ -975,7 +1048,7 @@ func showHistory(context *cli.Context) {
revisions := getRevisions(context)
showLocalHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -994,12 +1067,17 @@ func pruneSnapshots(context *cli.Context) {
os.Exit(ArgumentExitCode)
}
threads := context.Int("threads")
if threads < 1 {
threads = 1
}
repository, preference := getRepositoryPreference(context, "")
runScript(context, preference.Name, "pre")
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
storage := duplicacy.CreateStorage(*preference, false, 1)
storage := duplicacy.CreateStorage(*preference, false, threads)
if storage == nil {
return
}
@@ -1033,12 +1111,12 @@ func pruneSnapshots(context *cli.Context) {
os.Exit(ArgumentExitCode)
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.PruneSnapshots(selfID, snapshotID, revisions, tags, retentions,
exhaustive, exclusive, ignoredIDs, dryRun, deleteOnly, collectOnly)
exhaustive, exclusive, ignoredIDs, dryRun, deleteOnly, collectOnly, threads)
runScript(context, preference.Name, "post")
}
@@ -1073,7 +1151,7 @@ func copySnapshots(context *cli.Context) {
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
}
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword)
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, source.NobackupFile)
sourceManager.SetupSnapshotCache(source.Name)
duplicacy.SavePassword(*source, "password", sourcePassword)
@@ -1106,7 +1184,7 @@ func copySnapshots(context *cli.Context) {
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
destinationPassword)
destinationPassword, destination.NobackupFile)
duplicacy.SavePassword(*destination, "password", destinationPassword)
destinationManager.SetupSnapshotCache(destination.Name)
@@ -1147,6 +1225,11 @@ func infoStorage(context *cli.Context) {
DoNotSavePassword: true,
}
storageName := context.String("storage-name")
if storageName != "" {
preference.Name = storageName
}
if resetPasswords {
// We don't want password entered for the info command to overwrite the saved password for the default storage,
// so we simply assign an empty name.
@@ -1170,6 +1253,63 @@ func infoStorage(context *cli.Context) {
} else {
config.Print()
}
dirs, _, err := storage.ListFiles(0, "snapshots/")
if err != nil {
duplicacy.LOG_WARN("STORAGE_LIST", "Failed to list repository ids: %v", err)
return
}
for _, dir := range dirs {
if len(dir) > 0 && dir[len(dir)-1] == '/' {
duplicacy.LOG_INFO("STORAGE_SNAPSHOT", "%s", dir[0:len(dir) - 1])
}
}
}
func benchmark(context *cli.Context) {
setGlobalOptions(context)
defer duplicacy.CatchLogException()
fileSize := context.Int("file-size")
if fileSize == 0 {
fileSize = 256
}
chunkCount := context.Int("chunk-count")
if chunkCount == 0 {
chunkCount = 64
}
chunkSize := context.Int("chunk-size")
if chunkSize == 0 {
chunkSize = 4
}
downloadThreads := context.Int("download-threads")
if downloadThreads < 1 {
downloadThreads = 1
}
uploadThreads := context.Int("upload-threads")
if uploadThreads < 1 {
uploadThreads = 1
}
threads := downloadThreads
if (threads < uploadThreads) {
threads = uploadThreads
}
repository, preference := getRepositoryPreference(context, context.String("storage"))
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
storage := duplicacy.CreateStorage(*preference, false, threads)
if storage == nil {
return
}
duplicacy.Benchmark(repository, storage, int64(fileSize) * 1000000, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads)
}
func main() {
@@ -1204,7 +1344,7 @@ func main() {
},
cli.IntFlag{
Name: "iterations",
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
Usage: "the number of iterations used in storage key derivation (default is 16384)",
Argument: "<i>",
},
cli.StringFlag{
@@ -1212,6 +1352,16 @@ func main() {
Usage: "alternate location for the .duplicacy directory (absolute or relative to current directory)",
Argument: "<path>",
},
cli.StringFlag{
Name: "storage-name",
Usage: "assign a name to the storage",
Argument: "<name>",
},
cli.StringFlag{
Name: "repository",
Usage: "initialize a new repository at the specified path rather than the current working directory",
Argument: "<path>",
},
},
Usage: "Initialize the storage if necessary and the current directory as the repository",
ArgsUsage: "<snapshot id> <storage url>",
@@ -1247,17 +1397,27 @@ func main() {
},
cli.BoolFlag{
Name: "dry-run",
Usage: "Dry run for testing, don't backup anything. Use with -stats and -d",
Usage: "dry run for testing, don't backup anything. Use with -stats and -d",
},
cli.BoolFlag{
Name: "vss",
Usage: "enable the Volume Shadow Copy service (Windows only)",
Usage: "enable the Volume Shadow Copy service (Windows and macOS using APFS only)",
},
cli.IntFlag{
Name: "vss-timeout",
Value: 0,
Usage: "the timeout in seconds to wait for the Volume Shadow Copy operation to complete",
Argument: "<timeout>",
},
cli.StringFlag{
Name: "storage",
Usage: "backup to the specified storage instead of the default one",
Argument: "<storage name>",
},
cli.BoolFlag{
Name: "enum-only",
Usage: "enumerate the repository recursively and then exit",
},
},
Usage: "Save a snapshot of the repository to the storage",
ArgsUsage: " ",
@@ -1510,7 +1670,7 @@ func main() {
},
cli.StringSliceFlag{
Name: "t",
Usage: "delete snapshots with the specifed tags",
Usage: "delete snapshots with the specified tags",
Argument: "<tag>",
},
cli.StringSliceFlag{
@@ -1524,7 +1684,7 @@ func main() {
},
cli.BoolFlag{
Name: "exclusive",
Usage: "assume exclusive acess to the storage (disable two-step fossil collection)",
Usage: "assume exclusive access to the storage (disable two-step fossil collection)",
},
cli.BoolFlag{
Name: "dry-run, d",
@@ -1548,6 +1708,12 @@ func main() {
Usage: "prune snapshots from the specified storage",
Argument: "<storage name>",
},
cli.IntFlag{
Name: "threads",
Value: 1,
Usage: "number of threads used to prune unreferenced chunks",
Argument: "<n>",
},
},
Usage: "Prune snapshots by revision, tag, or retention policy",
ArgsUsage: " ",
@@ -1564,7 +1730,7 @@ func main() {
},
cli.IntFlag{
Name: "iterations",
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
Usage: "the number of iterations used in storage key derivation (default is 16384)",
Argument: "<i>",
},
},
@@ -1598,7 +1764,7 @@ func main() {
},
cli.IntFlag{
Name: "iterations",
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
Usage: "the number of iterations used in storage key derivation (default is 16384)",
Argument: "<i>",
},
cli.StringFlag{
@@ -1610,6 +1776,11 @@ func main() {
Name: "bit-identical",
Usage: "(when using -copy) make the new storage bit-identical to also allow rsync etc.",
},
cli.StringFlag{
Name: "repository",
Usage: "specify the path of the repository (instead of the current working directory)",
Argument: "<path>",
},
},
Usage: "Add an additional storage to be used for the existing repository",
ArgsUsage: "<storage name> <snapshot id> <storage url>",
@@ -1643,6 +1814,12 @@ func main() {
Value: &TriBool{},
Arg: "true",
},
cli.StringFlag{
Name: "nobackup-file",
Usage: "Directories containing a file with this name will not be backed up",
Argument: "<file name>",
Value: "",
},
cli.StringFlag{
Name: "key",
Usage: "add a key/password whose value is supplied by the -value option",
@@ -1699,7 +1876,7 @@ func main() {
cli.IntFlag{
Name: "threads",
Value: 1,
Usage: "number of downloading threads",
Usage: "number of uploading threads",
Argument: "<n>",
},
},
@@ -1720,6 +1897,11 @@ func main() {
Usage: "retrieve saved passwords from the specified repository",
Argument: "<repository directory>",
},
cli.StringFlag{
Name: "storage-name",
Usage: "the storage name to be assigned to the storage url",
Argument: "<name>",
},
cli.BoolFlag{
Name: "reset-passwords",
Usage: "take passwords from input rather than keychain/keyring",
@@ -1729,6 +1911,45 @@ func main() {
ArgsUsage: "<storage url>",
Action: infoStorage,
},
{
Name: "benchmark",
Flags: []cli.Flag{
cli.IntFlag{
Name: "file-size",
Usage: "the size of the local file to write to and read from (in MB, default to 256)",
Argument: "<size>",
},
cli.IntFlag{
Name: "chunk-count",
Usage: "the number of chunks to upload and download (default to 64)",
Argument: "<count>",
},
cli.IntFlag{
Name: "chunk-size",
Usage: "the size of chunks to upload and download (in MB, default to 4)",
Argument: "<size>",
},
cli.IntFlag{
Name: "upload-threads",
Usage: "the number of upload threads (default to 1)",
Argument: "<n>",
},
cli.IntFlag{
Name: "download-threads",
Usage: "the number of download threads (default to 1)",
Argument: "<n>",
},
cli.StringFlag{
Name: "storage",
Usage: "run the download/upload test agaist the specified storage",
Argument: "<storage name>",
},
},
Usage: "Run a set of benchmarks to test download and upload speeds",
ArgsUsage: " ",
Action: benchmark,
},
}
app.Flags = []cli.Flag{
@@ -1756,13 +1977,23 @@ func main() {
Name: "background",
Usage: "read passwords, tokens, or keys only from keychain/keyring or env",
},
cli.StringFlag{
Name: "profile",
Value: "",
Usage: "enable the profiling tool and listen on the specified address:port",
Argument: "<address:port>",
},
cli.StringFlag{
Name: "comment",
Usage: "add a comment to identify the process",
},
}
app.HideVersion = true
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.0.10"
app.Version = "2.1.2" + " (" + GitCommit + ")"
// If the program is interrupted, call the RunAtError function.
c := make(chan os.Signal, 1)

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

View File

@@ -55,7 +55,7 @@ func CreateACDStorage(tokenFile string, storagePath string, threads int) (storag
return nil, err
}
} else if !isDir {
return nil, fmt.Errorf("%s/%s is not a directory", storagePath+"/"+dir)
return nil, fmt.Errorf("%s is not a directory", storagePath+"/"+dir)
}
storage.idCache[dir] = dirID
}

View File

@@ -40,6 +40,7 @@ var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_acco
type B2Client struct {
HTTPClient *http.Client
AccountID string
ApplicationKeyID string
ApplicationKey string
AuthorizationToken string
APIURL string
@@ -53,11 +54,11 @@ type B2Client struct {
TestMode bool
}
func NewB2Client(accountID string, applicationKey string) *B2Client {
func NewB2Client(applicationKeyID string, applicationKey string) *B2Client {
client := &B2Client{
HTTPClient: http.DefaultClient,
AccountID: accountID,
ApplicationKey: applicationKey,
HTTPClient: http.DefaultClient,
ApplicationKeyID: applicationKeyID,
ApplicationKey: applicationKey,
}
return client
}
@@ -119,7 +120,7 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
}
if url == B2AuthorizationURL {
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.AccountID+":"+client.ApplicationKey)))
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.ApplicationKeyID+":"+client.ApplicationKey)))
} else {
request.Header.Set("Authorization", client.AuthorizationToken)
}
@@ -153,7 +154,7 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
return response.Body, response.Header, response.ContentLength, nil
}
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s %s' returned status code %d", method, url, response.StatusCode)
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
@@ -170,7 +171,6 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
continue
} else if response.StatusCode == 404 {
if http.MethodHead == method {
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
return nil, nil, 0, nil
}
} else if response.StatusCode == 416 {
@@ -226,6 +226,10 @@ func (client *B2Client) AuthorizeAccount() (err error) {
return err
}
// The account id may be different from the application key id so we're getting the account id from the returned
// json object here, which is needed by the b2_list_buckets call.
client.AccountID = output.AccountID
client.AuthorizationToken = output.AuthorizationToken
client.APIURL = output.APIURL
client.DownloadURL = output.DownloadURL
@@ -234,7 +238,7 @@ func (client *B2Client) AuthorizeAccount() (err error) {
}
type ListBucketOutput struct {
AccoundID string
AccountID string
BucketID string
BucketName string
BucketType string
@@ -580,7 +584,7 @@ func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit in
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
if response.StatusCode == 401 {
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorizatoin required")
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorization required")
client.UploadURL = ""
client.UploadToken = ""
continue

View File

@@ -210,6 +210,7 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
filePath = strings.Replace(filePath, " ", "%20", -1)
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
if err != nil {
return err
@@ -223,6 +224,7 @@ func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
filePath = strings.Replace(filePath, " ", "%20", -1)
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
}

View File

@@ -33,6 +33,8 @@ type BackupManager struct {
snapshotCache *FileStorage // for copies of chunks needed by snapshots
config *Config // contains a number of options
nobackupFile string // don't backup directory when this file name is found
}
func (manager *BackupManager) SetDryRun(dryRun bool) {
@@ -42,7 +44,7 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
// master key which can be nil if encryption is not enabled.
func CreateBackupManager(snapshotID string, storage Storage, top string, password string) *BackupManager {
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string) *BackupManager {
config, _, err := DownloadConfig(storage, password)
if err != nil {
@@ -63,6 +65,8 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
SnapshotManager: snapshotManager,
config: config,
nobackupFile: nobackupFile,
}
if IsDebugging() {
@@ -159,7 +163,7 @@ func setEntryContent(entries []*Entry, chunkLengths []int, offset int) {
// unmodified files with last backup). Otherwise (or if this is the first backup), the entire repository will
// be scanned to create the snapshot. 'tag' is the tag assigned to the new snapshot.
func (manager *BackupManager) Backup(top string, quickMode bool, threads int, tag string,
showStatistics bool, shadowCopy bool) bool {
showStatistics bool, shadowCopy bool, shadowCopyTimeout int, enumOnly bool) bool {
var err error
top, err = filepath.Abs(top)
@@ -180,16 +184,20 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
LOG_INFO("BACKUP_START", "Last backup at revision %d found", remoteSnapshot.Revision)
}
shadowTop := CreateShadowCopy(top, shadowCopy)
shadowTop := CreateShadowCopy(top, shadowCopy, shadowCopyTimeout)
defer DeleteShadowCopy()
LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop, manager.nobackupFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false
}
if enumOnly {
return true
}
// This cache contains all chunks referenced by last snasphot. Any other chunks will lead to a call to
// UploadChunk.
chunkCache := make(map[string]bool)
@@ -199,7 +207,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// A revision number of 0 means this is the initial backup
if remoteSnapshot.Revision > 0 {
// Add all chunks in the last snapshot to the cache
for _, chunkID := range manager.SnapshotManager.GetSnapshotChunks(remoteSnapshot) {
for _, chunkID := range manager.SnapshotManager.GetSnapshotChunks(remoteSnapshot, true) {
chunkCache[chunkID] = true
}
} else {
@@ -242,6 +250,9 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
}
}
LOG_DEBUG("CHUNK_INCOMPLETE", "The incomplete snapshot contains %d files and %d chunks", len(incompleteSnapshot.Files), len(incompleteSnapshot.ChunkHashes))
LOG_DEBUG("CHUNK_INCOMPLETE", "Last chunk in the incomplete snapshot that exist in the storage: %d", lastCompleteChunk)
// Only keep those files whose chunks exist in the cache
var files []*Entry
for _, file := range incompleteSnapshot.Files {
@@ -281,7 +292,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// we simply treat all files as if they were new, and break them into chunks.
// Otherwise, we need to find those that are new or recently modified
if remoteSnapshot.Revision == 0 && incompleteSnapshot == nil {
if (remoteSnapshot.Revision == 0 || !quickMode) && incompleteSnapshot == nil {
modifiedEntries = localSnapshot.Files
for _, entry := range modifiedEntries {
totalModifiedFileSize += entry.Size
@@ -461,7 +472,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
uploadedModifiedFileSize := atomic.AddInt64(&uploadedModifiedFileSize, int64(chunkSize))
if IsTracing() || showStatistics {
if (IsTracing() || showStatistics) && totalModifiedFileSize > 0 {
now := time.Now().Unix()
if now <= startUploadingTime {
now = startUploadingTime + 1
@@ -747,9 +758,9 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
}
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns)
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top)
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
return false
@@ -764,10 +775,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
for _, file := range remoteSnapshot.Files {
if MatchPath(file.Path, patterns) {
LOG_TRACE("RESTORE_INCLUDE", "Include %s", file.Path)
includedFiles = append(includedFiles, file)
} else {
LOG_TRACE("RESTORE_EXCLUDE", "Exclude %s", file.Path)
}
}
@@ -817,6 +825,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
if stat.Mode()&os.ModeSymlink != 0 {
isRegular, link, err := Readlink(fullPath)
if err == nil && link == entry.Link && !isRegular {
entry.RestoreMetadata(fullPath, nil, setOwner)
continue
}
}
@@ -829,12 +838,13 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", entry.Path, err)
return false
}
entry.RestoreMetadata(fullPath, nil, setOwner)
LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", entry.Path)
} else if entry.IsDir() {
stat, err := os.Stat(fullPath)
if err == nil && !stat.IsDir() {
LOG_ERROR("RESTORE_NOTDIR", "The path %s is not a directory: %v", fullPath, err)
LOG_ERROR("RESTORE_NOTDIR", "The path %s is not a directory", fullPath)
return false
}
@@ -915,9 +925,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
totalFileSize, downloadedFileSize, startDownloadingTime) {
downloadedFileSize += file.Size
downloadedFiles = append(downloadedFiles, file)
file.RestoreMetadata(fullPath, nil, setOwner)
}
file.RestoreMetadata(fullPath, nil, setOwner)
}
if deleteMode && len(patterns) == 0 {
@@ -1153,6 +1162,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
lengthMap := make(map[string]int)
var offset int64
// If the file is newly created (needed by sparse file optimization)
isNewFile := false
existingFile, err = os.Open(fullPath)
if err != nil {
if os.IsNotExist(err) {
@@ -1187,6 +1199,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
LOG_ERROR("DOWNLOAD_OPEN", "Can't reopen the initial file just created: %v", err)
return false
}
isNewFile = true
}
} else {
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
@@ -1199,6 +1212,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
}
}
// The key in this map is the number of zeroes. The value is the corresponding hash.
knownHashes := make(map[int]string)
fileHash := ""
if existingFile != nil {
@@ -1208,6 +1224,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
fileHasher := manager.config.NewFileHasher()
buffer := make([]byte, 64*1024)
err = nil
isSkipped := false
// We set to read one more byte so the file hash will be different if the file to be restored is a
// truncated portion of the existing file
for i := entry.StartChunk; i <= entry.EndChunk+1; i++ {
@@ -1223,6 +1240,28 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
chunkSize = 1 // the size of extra chunk beyond EndChunk
}
count := 0
if isNewFile {
if hash, found := knownHashes[chunkSize]; found {
// We have read the same number of zeros before, so we just retrieve the hash from the map
existingChunks = append(existingChunks, hash)
existingLengths = append(existingLengths, chunkSize)
offsetMap[hash] = offset
lengthMap[hash] = chunkSize
offset += int64(chunkSize)
isSkipped = true
continue
}
}
if isSkipped {
_, err := existingFile.Seek(offset, 0)
if err != nil {
LOG_ERROR("DOWNLOAD_SEEK", "Failed to seek to offset %d: %v", offset, err)
}
isSkipped = false
}
for count < chunkSize {
n := chunkSize - count
if n > cap(buffer) {
@@ -1249,12 +1288,16 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
offsetMap[hash] = offset
lengthMap[hash] = chunkSize
offset += int64(chunkSize)
if isNewFile {
knownHashes[chunkSize] = hash
}
}
if err == io.EOF {
break
}
}
fileHash = hex.EncodeToString(fileHasher.Sum(nil))
} else {
// If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so
@@ -1281,6 +1324,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
}
}
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
if _, found := offsetMap[chunkDownloader.taskList[i].chunkHash]; !found {
chunkDownloader.taskList[i].needed = true

View File

@@ -227,11 +227,11 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
if testFixedChunkSize {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil) {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false) {
t.Errorf("Failed to initialize the storage")
}
} else {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil) {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false) {
t.Errorf("Failed to initialize the storage")
}
}
@@ -239,11 +239,11 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager := CreateBackupManager("host1", storage, testDir, password)
backupManager := CreateBackupManager("host1", storage, testDir, password, "")
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
@@ -267,7 +267,7 @@ func TestBackupManager(t *testing.T) {
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
@@ -287,7 +287,7 @@ func TestBackupManager(t *testing.T) {
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
os.Mkdir(testDir+"/repository1/dir4", 0700)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second)
// Create some directories and files under repository2 that will be deleted during restore
@@ -343,16 +343,16 @@ func TestBackupManager(t *testing.T) {
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false)
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
if numberOfSnapshots != 2 {
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false)
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
if numberOfSnapshots != 3 {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)

235
src/duplicacy_benchmark.go Normal file
View File

@@ -0,0 +1,235 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"time"
)
func benchmarkSplit(reader *bytes.Reader, fileSize int64, chunkSize int, compression bool, encryption bool, annotation string) {
config := CreateConfig()
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
config.AverageChunkSize = chunkSize
config.MaximumChunkSize = chunkSize * 4
config.MinimumChunkSize = chunkSize / 4
config.ChunkSeed = []byte("duplicacy")
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
maker := CreateChunkMaker(config, false)
startTime := float64(time.Now().UnixNano()) / 1e9
numberOfChunks := 0
reader.Seek(0, os.SEEK_SET)
maker.ForEachChunk(reader,
func(chunk *Chunk, final bool) {
if compression {
key := ""
if encryption {
key = "0123456789abcdef0123456789abcdef"
}
err := chunk.Encrypt([]byte(key), "")
if err != nil {
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
}
}
config.PutChunk(chunk)
numberOfChunks++
},
func(size int64, hash string) (io.Reader, bool) {
return nil, false
})
runningTime := float64(time.Now().UnixNano())/1e9 - startTime
speed := int64(float64(fileSize) / runningTime)
LOG_INFO("BENCHMARK_SPLIT", "Split %s bytes into %d chunks %s in %.2fs: %s/s", PrettySize(fileSize), numberOfChunks, annotation,
runningTime, PrettySize(speed))
return
}
func benchmarkRun(threads int, chunkCount int, job func(threadIndex int, chunkIndex int)) {
indexChannel := make(chan int, chunkCount)
stopChannel := make(chan int, threads)
finishChannel := make(chan int, threads)
// Start the uploading goroutines
for i := 0; i < threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case chunkIndex := <-indexChannel:
job(threadIndex, chunkIndex)
finishChannel <- 0
case <-stopChannel:
return
}
}
}(i)
}
for i := 0; i < chunkCount; i++ {
indexChannel <- i
}
for i := 0; i < chunkCount; i++ {
<-finishChannel
}
for i := 0; i < threads; i++ {
stopChannel <- 0
}
}
func Benchmark(localDirectory string, storage Storage, fileSize int64, chunkSize int, chunkCount int, uploadThreads int, downloadThreads int) bool {
filename := filepath.Join(localDirectory, "benchmark.dat")
defer func() {
os.Remove(filename)
}()
LOG_INFO("BENCHMARK_GENERATE", "Generating %s byte random data in memory", PrettySize(fileSize))
data := make([]byte, fileSize)
_, err := rand.Read(data)
if err != nil {
LOG_ERROR("BENCHMARK_RAND", "Failed to generate random data: %v", err)
return false
}
startTime := float64(time.Now().UnixNano()) / 1e9
LOG_INFO("BENCHMARK_WRITE", "Writing random data to local disk")
err = ioutil.WriteFile(filename, data, 0600)
if err != nil {
LOG_ERROR("BENCHMARK_WRITE", "Failed to write the random data: %v", err)
return false
}
runningTime := float64(time.Now().UnixNano())/1e9 - startTime
speed := int64(float64(fileSize) / runningTime)
LOG_INFO("BENCHMARK_WRITE", "Wrote %s bytes in %.2fs: %s/s", PrettySize(fileSize), runningTime, PrettySize(speed))
startTime = float64(time.Now().UnixNano()) / 1e9
LOG_INFO("BENCHMARK_READ", "Reading the random data from local disk")
file, err := os.Open(filename)
if err != nil {
LOG_ERROR("BENCHMARK_OPEN", "Failed to open the random data file: %v", err)
return false
}
segment := make([]byte, 1024*1024)
for err == nil {
_, err = file.Read(segment)
}
if err != io.EOF {
LOG_ERROR("BENCHMARK_OPEN", "Failed to read the random data file: %v", err)
return false
}
file.Close()
runningTime = float64(time.Now().UnixNano())/1e9 - startTime
speed = int64(float64(fileSize) / runningTime)
LOG_INFO("BENCHMARK_READ", "Read %s bytes in %.2fs: %s/s", PrettySize(fileSize), runningTime, PrettySize(speed))
buffer := bytes.NewReader(data)
benchmarkSplit(buffer, fileSize, chunkSize, false, false, "without compression/encryption")
benchmarkSplit(buffer, fileSize, chunkSize, true, false, "with compression but without encryption")
benchmarkSplit(buffer, fileSize, chunkSize, true, true, "with compression and encryption")
storage.CreateDirectory(0, "benchmark")
existingFiles, _, err := storage.ListFiles(0, "benchmark/")
if err != nil {
LOG_ERROR("BENCHMARK_LIST", "Failed to list the benchmark directory: %v", err)
return false
}
var existingChunks []string
for _, f := range existingFiles {
if len(f) > 0 && f[len(f)-1] != '/' {
existingChunks = append(existingChunks, "benchmark/"+f)
}
}
if len(existingChunks) > 0 {
LOG_INFO("BENCHMARK_DELETE", "Deleting %d temporary files from previous benchmark runs", len(existingChunks))
benchmarkRun(uploadThreads, len(existingChunks), func(threadIndex int, chunkIndex int) {
storage.DeleteFile(threadIndex, existingChunks[chunkIndex])
})
}
chunks := make([][]byte, chunkCount)
chunkHashes := make([]string, chunkCount)
LOG_INFO("BENCHMARK_GENERATE", "Generating %d chunks", chunkCount)
for i := 0; i < chunkCount; i++ {
chunks[i] = make([]byte, chunkSize)
_, err = rand.Read(chunks[i])
if err != nil {
LOG_ERROR("BENCHMARK_RAND", "Failed to generate random data: %v", err)
return false
}
hashInBytes := sha256.Sum256(chunks[i])
chunkHashes[i] = hex.EncodeToString(hashInBytes[:])
}
startTime = float64(time.Now().UnixNano()) / 1e9
benchmarkRun(uploadThreads, chunkCount, func(threadIndex int, chunkIndex int) {
err := storage.UploadFile(threadIndex, fmt.Sprintf("benchmark/chunk%d", chunkIndex), chunks[chunkIndex])
if err != nil {
LOG_ERROR("BENCHMARK_UPLOAD", "Failed to upload the chunk: %v", err)
return
}
})
runningTime = float64(time.Now().UnixNano())/1e9 - startTime
speed = int64(float64(chunkSize*chunkCount) / runningTime)
LOG_INFO("BENCHMARK_UPLOAD", "Uploaded %s bytes in %.2fs: %s/s", PrettySize(int64(chunkSize*chunkCount)), runningTime, PrettySize(speed))
config := CreateConfig()
startTime = float64(time.Now().UnixNano()) / 1e9
hashError := false
benchmarkRun(downloadThreads, chunkCount, func(threadIndex int, chunkIndex int) {
chunk := config.GetChunk()
chunk.Reset(false)
err := storage.DownloadFile(threadIndex, fmt.Sprintf("benchmark/chunk%d", chunkIndex), chunk)
if err != nil {
LOG_ERROR("BENCHMARK_DOWNLOAD", "Failed to download the chunk: %v", err)
return
}
hashInBytes := sha256.Sum256(chunk.GetBytes())
hash := hex.EncodeToString(hashInBytes[:])
if hash != chunkHashes[chunkIndex] {
LOG_WARN("BENCHMARK_HASH", "Chunk %d has mismatched hashes: %s != %s", chunkIndex, chunkHashes[chunkIndex], hash)
hashError = true
}
config.PutChunk(chunk)
})
runningTime = float64(time.Now().UnixNano())/1e9 - startTime
speed = int64(float64(chunkSize*chunkCount) / runningTime)
LOG_INFO("BENCHMARK_DOWNLOAD", "Downloaded %s bytes in %.2fs: %s/s", PrettySize(int64(chunkSize*chunkCount)), runningTime, PrettySize(speed))
if !hashError {
benchmarkRun(uploadThreads, chunkCount, func(threadIndex int, chunkIndex int) {
storage.DeleteFile(threadIndex, fmt.Sprintf("benchmark/chunk%d", chunkIndex))
})
LOG_INFO("BENCHMARK_DELETE", "Deleted %d temporary files from the storage", chunkCount)
}
return true
}

View File

@@ -5,11 +5,14 @@
package duplicacy
import (
"os"
"bytes"
"compress/zlib"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"fmt"
"hash"
@@ -247,10 +250,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
// to be the maximum allowed by PKCS7
dataLength := encryptedBuffer.Len() - offset
paddingLength := dataLength % 256
if paddingLength == 0 {
paddingLength = 256
}
paddingLength := 256 - dataLength % 256
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
@@ -267,6 +267,17 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
}
// This is to ensure compability with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
// derive the key used to encrypt/decrypt files and chunks.
var DecryptWithHMACSHA256 = false
func init() {
if value, found := os.LookupEnv("DUPLICACY_DECRYPT_WITH_HMACSHA256"); found && value != "0" {
DecryptWithHMACSHA256 = true
}
}
// Decrypt decrypts the encrypted data stored in the chunk buffer. If derivationKey is not nil, the actual
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err error) {
@@ -286,7 +297,13 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
key := encryptionKey
if len(derivationKey) > 0 {
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
var hasher hash.Hash
if DecryptWithHMACSHA256 {
hasher = hmac.New(sha256.New, []byte(derivationKey))
} else {
hasher = chunk.config.NewKeyedHasher([]byte(derivationKey))
}
hasher.Write(encryptionKey)
key = hasher.Sum(nil)
}
@@ -325,6 +342,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
return err
}
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
if paddingLength == 0 {
paddingLength = 256

View File

@@ -22,6 +22,8 @@ func TestChunk(t *testing.T) {
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
maxSize := 1000000
remainderLength := -1
for i := 0; i < 500; i++ {
size := rand.Int() % maxSize
@@ -44,6 +46,12 @@ func TestChunk(t *testing.T) {
encryptedData := make([]byte, chunk.GetLength())
copy(encryptedData, chunk.GetBytes())
if remainderLength == -1 {
remainderLength = len(encryptedData) % 256
} else if len(encryptedData) % 256 != remainderLength {
t.Errorf("Incorrect padding size")
}
chunk.Reset(false)
chunk.Write(encryptedData)
err = chunk.Decrypt(key, "")

View File

@@ -174,7 +174,7 @@ func (downloader *ChunkDownloader) Prefetch(file *Entry) {
// Reclaim releases the downloaded chunk to the chunk pool
func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
if downloader.lastChunkIndex == chunkIndex {
if downloader.lastChunkIndex >= chunkIndex {
return
}
@@ -187,13 +187,20 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
}
}
for i := downloader.lastChunkIndex; i < chunkIndex; i++ {
// These chunks are never downloaded if 'isDownloading' is false; note that 'isDownloading' isn't reset to
// false after a chunk has been downloaded
if !downloader.taskList[i].isDownloading {
atomic.AddInt64(&downloader.totalChunkSize, -int64(downloader.taskList[i].chunkLength))
}
}
downloader.lastChunkIndex = chunkIndex
}
// WaitForChunk waits until the specified chunk is ready
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
// Reclain any chunk not needed
// Reclaim any chunk not needed
downloader.Reclaim(chunkIndex)
// If we haven't started download the specified chunk, download it now
@@ -298,38 +305,68 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
// will be set up before the encryption
chunk.Reset(false)
// Find the chunk by ID first.
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
const MaxDownloadAttempts = 3
for downloadAttempt := 0; ; downloadAttempt++ {
if !exist {
// No chunk is found. Have to find it in the fossil pool again.
chunkPath, exist, _, err = downloader.storage.FindChunk(threadIndex, chunkID, true)
// Find the chunk by ID first.
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
if !exist {
// A chunk is not found. This is a serious error and hopefully it will never happen.
// No chunk is found. Have to find it in the fossil pool again.
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
if err != nil {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
return false
}
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
}
const MaxDownloadAttempts = 3
for downloadAttempt := 0; ; downloadAttempt++ {
if !exist {
retry := false
// Retry for Hubic or WebDAV as it may return 404 even when the chunk exists
if _, ok := downloader.storage.(*HubicStorage); ok {
retry = true
}
if _, ok := downloader.storage.(*WebDAVStorage); ok {
retry = true
}
if retry && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to find the chunk %s; retrying", chunkID)
continue
}
// A chunk is not found. This is a serious error and hopefully it will never happen.
if err != nil {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
}
return false
}
// We can't download the fossil directly. We have to turn it back into a regular chunk and try
// downloading again.
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
if err != nil {
LOG_FATAL("DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
return false
}
LOG_WARN("DOWNLOAD_RESURRECT", "Fossil %s has been resurrected", chunkID)
continue
}
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
if err == io.ErrUnexpectedEOF && downloadAttempt < MaxDownloadAttempts {
_, isHubic := downloader.storage.(*HubicStorage)
// Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists
if (err == io.ErrUnexpectedEOF || isHubic) && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
continue
@@ -368,7 +405,7 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
if len(cachedPath) > 0 {
// Save a copy to the local snapshot cache
err = downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
err := downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
}

View File

@@ -0,0 +1,209 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"sync"
"sync/atomic"
"time"
)
// These are operations that ChunkOperator will perform.
const (
ChunkOperationFind = 0
ChunkOperationDelete = 1
ChunkOperationFossilize = 2
ChunkOperationResurrect = 3
)
// ChunkOperatorTask is used to pass paramaters for different kinds of chunk operations.
type ChunkOperatorTask struct {
operation int // The type of operation
chunkID string // The chunk id
filePath string // The path of the chunk file; it may be empty
}
// ChunkOperator is capable of performing multi-threaded operations on chunks.
type ChunkOperator struct {
numberOfActiveTasks int64 // The number of chunks that are being operated on
storage Storage // This storage
threads int // Number of threads
taskQueue chan ChunkOperatorTask // Operating goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop all the goroutines
fossils []string // For fossilize operation, the paths of the fossils are stored in this slice
fossilsLock *sync.Mutex // The lock for 'fossils'
}
// CreateChunkOperator creates a new ChunkOperator.
func CreateChunkOperator(storage Storage, threads int) *ChunkOperator {
operator := &ChunkOperator{
storage: storage,
threads: threads,
taskQueue: make(chan ChunkOperatorTask, threads*4),
stopChannel: make(chan bool),
fossils: make([]string, 0),
fossilsLock: &sync.Mutex{},
}
// Start the operator goroutines
for i := 0; i < operator.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <-operator.taskQueue:
operator.Run(threadIndex, task)
case <-operator.stopChannel:
return
}
}
}(i)
}
return operator
}
func (operator *ChunkOperator) Stop() {
if atomic.LoadInt64(&operator.numberOfActiveTasks) < 0 {
return
}
for atomic.LoadInt64(&operator.numberOfActiveTasks) > 0 {
time.Sleep(100 * time.Millisecond)
}
for i := 0; i < operator.threads; i++ {
operator.stopChannel <- false
}
// Assign -1 to numberOfActiveTasks so Stop() can be called multiple times
atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1))
}
func (operator *ChunkOperator) AddTask(operation int, chunkID string, filePath string) {
task := ChunkOperatorTask{
operation: operation,
chunkID: chunkID,
filePath: filePath,
}
operator.taskQueue <- task
atomic.AddInt64(&operator.numberOfActiveTasks, int64(1))
}
func (operator *ChunkOperator) Find(chunkID string) {
operator.AddTask(ChunkOperationFind, chunkID, "")
}
func (operator *ChunkOperator) Delete(chunkID string, filePath string) {
operator.AddTask(ChunkOperationDelete, chunkID, filePath)
}
func (operator *ChunkOperator) Fossilize(chunkID string, filePath string) {
operator.AddTask(ChunkOperationFossilize, chunkID, filePath)
}
func (operator *ChunkOperator) Resurrect(chunkID string, filePath string) {
operator.AddTask(ChunkOperationResurrect, chunkID, filePath)
}
func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
defer func() {
atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1))
}()
// task.filePath may be empty. If so, find the chunk first.
if task.operation == ChunkOperationDelete || task.operation == ChunkOperationFossilize {
if task.filePath == "" {
filePath, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false)
if err != nil {
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err)
return
} else if !exist {
if task.operation == ChunkOperationDelete {
LOG_WARN("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID)
return
}
fossilPath, exist, _, _ := operator.storage.FindChunk(threadIndex, task.chunkID, true)
if exist {
LOG_WARN("CHUNK_FOSSILIZE", "Chunk %s is already a fossil", task.chunkID)
operator.fossilsLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
} else {
LOG_ERROR("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID)
}
return
}
task.filePath = filePath
}
}
if task.operation == ChunkOperationFind {
_, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false)
if err != nil {
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err)
} else if !exist {
LOG_ERROR("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID)
} else {
LOG_DEBUG("CHUNK_FIND", "Chunk %s exists in the storage", task.chunkID)
}
} else if task.operation == ChunkOperationDelete {
err := operator.storage.DeleteFile(threadIndex, task.filePath)
if err != nil {
LOG_WARN("CHUNK_DELETE", "Failed to remove the file %s: %v", task.filePath, err)
} else {
if task.chunkID != "" {
LOG_INFO("CHUNK_DELETE", "The chunk %s has been permanently removed", task.chunkID)
} else {
LOG_INFO("CHUNK_DELETE", "Deleted file %s from the storage", task.filePath)
}
}
} else if task.operation == ChunkOperationFossilize {
fossilPath := task.filePath + ".fsl"
err := operator.storage.MoveFile(threadIndex, task.filePath, fossilPath)
if err != nil {
if _, exist, _, _ := operator.storage.FindChunk(threadIndex, task.chunkID, true); exist {
err := operator.storage.DeleteFile(threadIndex, task.filePath)
if err == nil {
LOG_TRACE("CHUNK_DELETE", "Deleted chunk file %s as the fossil already exists", task.chunkID)
}
operator.fossilsLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
} else {
LOG_ERROR("CHUNK_DELETE", "Failed to fossilize the chunk %s: %v", task.chunkID, err)
}
} else {
LOG_TRACE("CHUNK_FOSSILIZE", "The chunk %s has been marked as a fossil", task.chunkID)
operator.fossilsLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
}
} else if task.operation == ChunkOperationResurrect {
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false)
if err != nil {
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err)
}
if exist {
operator.storage.DeleteFile(threadIndex, task.filePath)
LOG_INFO("FOSSIL_RESURRECT", "The chunk %s already exists", task.chunkID)
} else {
err := operator.storage.MoveFile(threadIndex, task.filePath, chunkPath)
if err != nil {
LOG_ERROR("FOSSIL_RESURRECT", "Failed to resurrect the chunk %s from the fossil %s: %v",
task.chunkID, task.filePath, err)
} else {
LOG_INFO("FOSSIL_RESURRECT", "The chunk %s has been resurrected", task.filePath)
}
}
}
}

View File

@@ -111,7 +111,7 @@ func TestUploaderAndDownloader(t *testing.T) {
for i, chunk := range chunks {
downloaded := chunkDownloader.WaitForChunk(i)
if downloaded.GetID() != chunk.GetID() {
t.Error("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
t.Errorf("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
}
}

View File

@@ -22,6 +22,9 @@ import (
var DUPLICACY_DIRECTORY = ".duplicacy"
var DUPLICACY_FILE = ".duplicacy"
// Mask for file permission bits
var fileModeMask = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
// Regex for matching 'StartChunk:StartOffset:EndChunk:EndOffset'
var contentRegex = regexp.MustCompile(`^([0-9]+):([0-9]+):([0-9]+):([0-9]+)`)
@@ -269,7 +272,7 @@ func (entry *Entry) IsLink() bool {
}
func (entry *Entry) GetPermissions() os.FileMode {
return os.FileMode(entry.Mode) & os.ModePerm
return os.FileMode(entry.Mode)&fileModeMask
}
func (entry *Entry) IsSameAs(other *Entry) bool {
@@ -289,7 +292,7 @@ func (entry *Entry) String(maxSizeDigits int) string {
func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setOwner bool) bool {
if fileInfo == nil {
stat, err := os.Stat(fullPath)
stat, err := os.Lstat(fullPath)
fileInfo = &stat
if err != nil {
LOG_ERROR("RESTORE_STAT", "Failed to retrieve the file info: %v", err)
@@ -297,7 +300,15 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
}
}
if (*fileInfo).Mode()&os.ModePerm != entry.GetPermissions() {
// Note that chown can remove setuid/setgid bits so should be called before chmod
if setOwner {
if !SetOwner(fullPath, entry, fileInfo) {
return false
}
}
// Only set the permission if the file is not a symlink
if !entry.IsLink() && (*fileInfo).Mode() & fileModeMask != entry.GetPermissions() {
err := os.Chmod(fullPath, entry.GetPermissions())
if err != nil {
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
@@ -305,7 +316,8 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
}
}
if (*fileInfo).ModTime().Unix() != entry.Time {
// Only set the time if the file is not a symlink
if !entry.IsLink() && (*fileInfo).ModTime().Unix() != entry.Time {
modifiedTime := time.Unix(entry.Time, 0)
err := os.Chtimes(fullPath, modifiedTime, modifiedTime)
if err != nil {
@@ -318,11 +330,7 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
entry.SetAttributesToFile(fullPath)
}
if setOwner {
return SetOwner(fullPath, entry, fileInfo)
} else {
return true
}
return true
}
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
@@ -435,7 +443,7 @@ func (files FileInfoCompare) Less(i, j int) bool {
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, discardAttributes bool) (directoryList []*Entry,
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, nobackupFile string, discardAttributes bool) (directoryList []*Entry,
skippedFiles []string, err error) {
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
@@ -448,6 +456,15 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
if err != nil {
return directoryList, nil, err
}
// This binary search works because ioutil.ReadDir returns files sorted by Name() by default
if nobackupFile != "" {
ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0})
if ii < len(files) && files[ii].Name() == nobackupFile {
LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path)
return directoryList, skippedFiles, nil
}
}
normalizedPath := path
if len(normalizedPath) > 0 && normalizedPath[len(normalizedPath)-1] != '/' {
@@ -469,7 +486,6 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
}
entry := CreateEntryFromFileInfo(f, normalizedPath)
if len(patterns) > 0 && !MatchPath(entry.Path, patterns) {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded", entry.Path)
continue
}
if entry.IsLink() {
@@ -483,7 +499,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
if isRegular {
entry.Mode ^= uint32(os.ModeSymlink)
} else if path == "" && filepath.IsAbs(entry.Link) && !strings.HasPrefix(entry.Link, normalizedTop) {
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
stat, err := os.Stat(filepath.Join(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)

View File

@@ -173,7 +173,7 @@ func TestEntryList(t *testing.T) {
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
entries = append(entries, directory)
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, false)
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false)
if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
}

View File

@@ -24,11 +24,16 @@ import (
"google.golang.org/api/googleapi"
)
var (
GCDFileMimeType = "application/octet-stream"
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
)
type GCDStorage struct {
StorageBase
service *drive.Service
idCache map[string]string
idCache map[string]string // only directories are saved in this cache
idCacheLock sync.Mutex
backoffs []int // desired backoff time in seconds for each thread
attempts []int // number of failed attempts since last success for each thread
@@ -165,7 +170,7 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
startToken := ""
query := "'" + parentID + "' in parents "
query := "'" + parentID + "' in parents and trashed = false "
if listFiles && !listDirectories {
query += "and mimeType != 'application/vnd.google-apps.folder'"
} else if !listFiles && !listDirectories {
@@ -209,7 +214,7 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
var err error
for {
query := "name = '" + name + "' and '" + parentID + "' in parents"
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
@@ -227,7 +232,7 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
file := fileList.Files[0]
return file.Id, file.MimeType == "application/vnd.google-apps.folder", file.Size, nil
return file.Id, file.MimeType == GCDDirectoryMimeType, file.Size, nil
}
// getIDFromPath returns the id of the given path. If 'createDirectories' is true, create the given path and all its
@@ -283,10 +288,10 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
}
fileID = currentID
continue
} else {
} else if isDir {
storage.savePathID(current, fileID)
}
if i != len(names)-1 && !isDir {
if i != len(names) - 1 && !isDir {
return "", fmt.Errorf("Path '%s' is not a directory", current)
}
}
@@ -332,11 +337,13 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
storage.attempts[i] = 0
}
storagePathID, err := storage.getIDFromPath(0, storagePath, false)
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
if err != nil {
return nil, err
}
// Reset the id cache and start with 'storagePathID' as the root
storage.idCache = make(map[string]string)
storage.idCache[""] = storagePathID
for _, dir := range []string{"chunks", "snapshots", "fossils"} {
@@ -379,11 +386,11 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
subDirs := []string{}
for _, file := range files {
storage.savePathID("snapshots/"+file.Name, file.Id)
subDirs = append(subDirs, file.Name+"/")
storage.savePathID("snapshots/" + file.Name, file.Id)
subDirs = append(subDirs, file.Name + "/")
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") {
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
pathID, err := storage.getIDFromPath(threadIndex, dir, false)
if err != nil {
return nil, nil, err
@@ -400,7 +407,6 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
files := []string{}
for _, entry := range entries {
storage.savePathID(dir+"/"+entry.Name, entry.Id)
files = append(files, entry.Name)
}
return files, nil, nil
@@ -420,7 +426,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
return nil, nil, err
}
for _, entry := range entries {
if entry.MimeType != "application/vnd.google-apps.folder" {
if entry.MimeType != GCDDirectoryMimeType {
name := entry.Name
if strings.HasPrefix(parent, "fossils") {
name = parent + "/" + name + ".fsl"
@@ -432,9 +438,9 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
files = append(files, name)
sizes = append(sizes, entry.Size)
} else {
parents = append(parents, parent+"/"+entry.Name)
parents = append(parents, parent+ "/" + entry.Name)
storage.savePathID(parent + "/" + entry.Name, entry.Id)
}
storage.savePathID(parent+"/"+entry.Name, entry.Id)
}
}
return files, sizes, nil
@@ -474,9 +480,12 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
from = storage.convertFilePath(from)
to = storage.convertFilePath(to)
fileID, ok := storage.findPathID(from)
if !ok {
return fmt.Errorf("Attempting to rename file %s with unknown id", from)
fileID, err := storage.getIDFromPath(threadIndex, from, false)
if err != nil {
return fmt.Errorf("Failed to retrieve the id of '%s': %v", from, err)
}
if fileID == "" {
return fmt.Errorf("The file '%s' to be moved does not exist", from)
}
fromParent := path.Dir(from)
@@ -505,8 +514,6 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
}
}
storage.savePathID(to, storage.getPathID(from))
storage.deletePathID(from)
return nil
}
@@ -539,21 +546,22 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
}
name := path.Base(dir)
file := &drive.File{
Name: name,
MimeType: "application/vnd.google-apps.folder",
Parents: []string{parentID},
}
var file *drive.File
for {
file = &drive.File{
Name: name,
MimeType: GCDDirectoryMimeType,
Parents: []string{parentID},
}
file, err = storage.service.Files.Create(file).Fields("id").Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
break
} else {
// Check if the directory has already been created by other thread
exist, _, _, newErr := storage.GetFileInfo(threadIndex, dir)
if newErr == nil && exist {
if _, ok := storage.findPathID(dir); ok {
return nil
}
@@ -577,36 +585,29 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
filePath = storage.convertFilePath(filePath)
fileID, ok := storage.findPathID(filePath)
if !ok {
dir := path.Dir(filePath)
if dir == "." {
dir = ""
}
dirID, err := storage.getIDFromPath(threadIndex, dir, false)
if err != nil {
return false, false, 0, err
}
if dirID == "" {
return false, false, 0, nil
}
fileID, isDir, size, err = storage.listByName(threadIndex, dirID, path.Base(filePath))
if fileID != "" {
storage.savePathID(filePath, fileID)
}
return fileID != "", isDir, size, err
if ok {
// Only directories are saved in the case so this must be a directory
return true, true, 0, nil
}
for {
file, err := storage.service.Files.Get(fileID).Fields("id, mimeType").Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
return true, file.MimeType == "application/vnd.google-apps.folder", file.Size, nil
} else if retry {
continue
} else {
return false, false, 0, err
}
dir := path.Dir(filePath)
if dir == "." {
dir = ""
}
dirID, err := storage.getIDFromPath(threadIndex, dir, false)
if err != nil {
return false, false, 0, err
}
if dirID == "" {
return false, false, 0, nil
}
fileID, isDir, size, err = storage.listByName(threadIndex, dirID, path.Base(filePath))
if fileID != "" && isDir {
storage.savePathID(filePath, fileID)
}
return fileID != "", isDir, size, err
}
// DownloadFile reads the file at 'filePath' into the chunk.
@@ -656,7 +657,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
file := &drive.File{
Name: path.Base(filePath),
MimeType: "application/octet-stream",
MimeType: GCDFileMimeType,
Parents: []string{parentID},
}

View File

@@ -72,7 +72,7 @@ func NewHubicClient(tokenFile string) (*HubicClient, error) {
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 60 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
ResponseHeaderTimeout: 300 * time.Second,
ExpectContinueTimeout: 10 * time.Second,
},
},
@@ -82,7 +82,7 @@ func NewHubicClient(tokenFile string) (*HubicClient, error) {
CredentialLock: &sync.Mutex{},
}
err = client.RefreshToken()
err = client.RefreshToken(false)
if err != nil {
return nil, err
}
@@ -100,7 +100,7 @@ func (client *HubicClient) call(url string, method string, input interface{}, ex
var response *http.Response
backoff := 1
for i := 0; i < 8; i++ {
for i := 0; i < 11; i++ {
LOG_DEBUG("HUBIC_CALL", "%s %s", method, url)
@@ -151,6 +151,13 @@ func (client *HubicClient) call(url string, method string, input interface{}, ex
response, err = client.HTTPClient.Do(request)
if err != nil {
if url != HubicCredentialURL {
retryAfter := time.Duration((0.5 + rand.Float32()) * 1000.0 * float32(backoff))
LOG_INFO("HUBIC_CALL", "%s %s returned an error: %v; retry after %d milliseconds", method, url, err, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
}
return nil, 0, "", err
}
@@ -179,7 +186,7 @@ func (client *HubicClient) call(url string, method string, input interface{}, ex
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when retrieving credentials"}
}
err = client.RefreshToken()
err = client.RefreshToken(true)
if err != nil {
return nil, 0, "", err
}
@@ -190,7 +197,13 @@ func (client *HubicClient) call(url string, method string, input interface{}, ex
}
continue
} else if response.StatusCode >= 500 && response.StatusCode < 600 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
retryAfter := time.Duration((0.5 + rand.Float32()) * 1000.0 * float32(backoff))
LOG_INFO("HUBIC_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
} else if response.StatusCode == 408 {
retryAfter := time.Duration((0.5 + rand.Float32()) * 1000.0 * float32(backoff))
LOG_INFO("HUBIC_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
@@ -203,11 +216,11 @@ func (client *HubicClient) call(url string, method string, input interface{}, ex
return nil, 0, "", fmt.Errorf("Maximum number of retries reached")
}
func (client *HubicClient) RefreshToken() (err error) {
func (client *HubicClient) RefreshToken(force bool) (err error) {
client.TokenLock.Lock()
defer client.TokenLock.Unlock()
if client.Token.Valid() {
if !force && client.Token.Valid() {
return nil
}

View File

@@ -106,17 +106,19 @@ func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, [
} else {
files := []string{}
sizes := []int64{}
entries, err := storage.client.ListEntries(storage.storageDir + "/chunks")
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
if entry.Type == "application/directory" {
continue
files = append(files, entry.Name + "/")
sizes = append(sizes, 0)
} else {
files = append(files, entry.Name)
sizes = append(sizes, entry.Size)
}
files = append(files, entry.Name)
sizes = append(sizes, entry.Size)
}
return files, sizes, nil
}

View File

@@ -136,6 +136,16 @@ func keyringSet(key string, value string) bool {
if value == "" {
keyring[key] = nil
} else {
// Check if the value to be set is the same as the existing one
existingEncryptedValue := keyring[key]
if len(existingEncryptedValue) > 0 {
existingValue, err := keyringDecrypt(existingEncryptedValue)
if err == nil && string(existingValue) == value {
return true
}
}
encryptedValue, err := keyringEncrypt([]byte(value))
if err != nil {
LOG_DEBUG("KEYRING_ENCRYPT", "Failed to encrypt the value: %v", err)

View File

@@ -129,7 +129,7 @@ func logf(level int, logID string, format string, v ...interface{}) {
// fmt.Printf("%s %s %s %s\n", now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
if testingT != nil {
if level < WARN {
if level <= WARN {
if level >= loggingLevel {
testingT.Logf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)

View File

@@ -65,6 +65,8 @@ func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
TokenLock: &sync.Mutex{},
}
client.RefreshToken(false)
return client, nil
}
@@ -154,7 +156,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
}
err = client.RefreshToken()
err = client.RefreshToken(true)
if err != nil {
return nil, 0, err
}
@@ -178,11 +180,11 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
return nil, 0, fmt.Errorf("Maximum number of retries reached")
}
func (client *OneDriveClient) RefreshToken() (err error) {
func (client *OneDriveClient) RefreshToken(force bool) (err error) {
client.TokenLock.Lock()
defer client.TokenLock.Unlock()
if client.Token.Valid() {
if !force && client.Token.Valid() {
return nil
}

View File

@@ -97,7 +97,7 @@ func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string
}
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") {
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err

View File

@@ -17,11 +17,13 @@ import (
type Preference struct {
Name string `json:"name"`
SnapshotID string `json:"id"`
RepositoryPath string `json:"repository"`
StorageURL string `json:"storage"`
Encrypted bool `json:"encrypted"`
BackupProhibited bool `json:"no_backup"`
RestoreProhibited bool `json:"no_restore"`
DoNotSavePassword bool `json:"no_save_password"`
NobackupFile string `json:"nobackup_file"`
Keys map[string]string `json:"keys"`
}

View File

@@ -2,6 +2,11 @@
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
// NOTE: The code in the Wasabi storage module relies on all functions
// in this one except MoveFile(), IsMoveFileImplemented() and
// IsStrongConsistent(). Changes to the API here will need to be
// reflected there.
package duplicacy
import (

View File

@@ -53,7 +53,7 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
}
if server == "sftp.hidrive.strato.com" {
sftpConfig.Ciphers = []string{"aes128-cbc", "aes128-ctr", "aes256-ctr"}
sftpConfig.Ciphers = []string{"aes128-ctr", "aes256-ctr"}
}
serverAddress := fmt.Sprintf("%s:%d", server, port)

3
src/duplicacy_shadowcopy.go Normal file → Executable file
View File

@@ -3,10 +3,11 @@
// Commercial use requires per-user licenses available from https://duplicacy.com
// +build !windows
// +build !darwin
package duplicacy
func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
return top
}

View File

@@ -0,0 +1,170 @@
//
// Shadow copy module for Mac OSX using APFS snapshot
//
//
// This module copyright 2018 Adam Marcus (https://github.com/amarcu5)
// and may be distributed under the same terms as Duplicacy.
package duplicacy
import (
"context"
"errors"
"io/ioutil"
"os"
"os/exec"
"strings"
"syscall"
"time"
)
var snapshotPath string
var snapshotDate string
// Converts char array to string
func CharsToString(ca []int8) string {
len := len(ca)
ba := make([]byte, len)
for i, v := range ca {
ba[i] = byte(v)
if ba[i] == 0 {
len = i
break
}
}
return string(ba[:len])
}
// Get ID of device containing path
func GetPathDeviceId(path string) (deviceId int32, err error) {
stat := syscall.Stat_t{}
err = syscall.Stat(path, &stat)
if err != nil {
return 0, err
}
return stat.Dev, nil
}
// Executes shell command with timeout and returns stdout
func CommandWithTimeout(timeoutInSeconds int, name string, arg ...string) (output string, err error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutInSeconds) * time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, name, arg...)
out, err := cmd.Output()
if ctx.Err() == context.DeadlineExceeded {
err = errors.New("Command '" + name + "' timed out")
}
output = string(out)
return output, err
}
func DeleteShadowCopy() {
if snapshotPath == "" {
return
}
err := exec.Command("/sbin/umount", "-f", snapshotPath).Run()
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while unmounting snapshot")
return
}
err = exec.Command("tmutil", "deletelocalsnapshots", snapshotDate).Run()
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while deleting local snapshot")
return
}
err = os.RemoveAll(snapshotPath)
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while deleting temporary mount directory")
return
}
LOG_INFO("VSS_DELETE", "Shadow copy unmounted and deleted at %s", snapshotPath)
snapshotPath = ""
}
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
if !shadowCopy {
return top
}
// Check repository filesystem is APFS
stat := syscall.Statfs_t{}
err := syscall.Statfs(top, &stat)
if err != nil {
LOG_ERROR("VSS_INIT", "Unable to determine filesystem of repository path")
return top
}
if CharsToString(stat.Fstypename[:]) != "apfs" {
LOG_WARN("VSS_INIT", "VSS requires APFS filesystem")
return top
}
// Check path is local as tmutil snapshots will not support APFS formatted external drives
deviceIdLocal, err := GetPathDeviceId("/")
if err != nil {
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: /")
return top
}
deviceIdRepository, err := GetPathDeviceId(top)
if err != nil {
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: ", top)
return top
}
if deviceIdLocal != deviceIdRepository {
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
return top
}
if timeoutInSeconds <= 60 {
timeoutInSeconds = 60
}
// Create mount point
snapshotPath, err = ioutil.TempDir("/tmp/", "snp_")
if err != nil {
LOG_ERROR("VSS_CREATE", "Failed to create temporary mount directory")
return top
}
// Use tmutil to create snapshot
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: ", err)
return top
}
colonPos := strings.IndexByte(tmutilOutput, ':')
if colonPos < 0 {
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: ", tmutilOutput)
return top
}
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
// Mount snapshot as readonly and hide from GUI i.e. Finder
_, err = CommandWithTimeout(timeoutInSeconds,
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine." + snapshotDate, "/", snapshotPath)
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
return top
}
LOG_INFO("VSS_DONE", "Shadow copy created and mounted at %s", snapshotPath)
return snapshotPath + top
}

View File

@@ -353,12 +353,15 @@ func DeleteShadowCopy() {
ole.CoUninitialize()
}
func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
if !shadowCopy {
return top
}
if timeoutInSeconds <= 60 {
timeoutInSeconds = 60
}
ole.CoInitialize(0)
defer ole.CoUninitialize()
@@ -416,7 +419,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
return top
}
if !async.Wait(60) {
if !async.Wait(timeoutInSeconds) {
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata didn't finish properly")
return top
}
@@ -456,7 +459,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
return top
}
if !async.Wait(60) {
if !async.Wait(timeoutInSeconds) {
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup didn't finish properly")
return top
}
@@ -473,7 +476,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
return top
}
if !async.Wait(180) {
if !async.Wait(timeoutInSeconds) {
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet didn't finish properly")
return top
}

View File

@@ -57,7 +57,7 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, skippedDirectories []string,
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (snapshot *Snapshot, skippedDirectories []string,
skippedFiles []string, err error) {
snapshot = &Snapshot{
@@ -125,7 +125,7 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
snapshot.Files = append(snapshot.Files, directory)
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, snapshot.discardAttributes)
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes)
if err != nil {
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
skippedDirectories = append(skippedDirectories, directory.Path)
@@ -357,6 +357,11 @@ func (snapshot *Snapshot) LoadChunks(description []byte) (err error) {
return err
}
// ClearChunks removes loaded chunks from memory
func (snapshot *Snapshot) ClearChunks() {
snapshot.ChunkHashes = nil
}
// LoadLengths construct 'ChunkLengths' from the json description.
func (snapshot *Snapshot) LoadLengths(description []byte) (err error) {
return json.Unmarshal(description, &snapshot.ChunkLengths)

File diff suppressed because it is too large Load Diff

View File

@@ -14,6 +14,7 @@ import (
"strings"
"testing"
"time"
"io/ioutil"
)
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
@@ -107,6 +108,9 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
snapshotCache.CreateDirectory(0, "snapshots")
snapshotManager.snapshotCache = snapshotCache
SetDuplicacyPreferencePath(testDir + "/.duplicacy")
return snapshotManager
}
@@ -140,7 +144,16 @@ func uploadRandomChunk(manager *SnapshotManager, chunkSize int) string {
return uploadTestChunk(manager, content)
}
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string) {
func uploadRandomChunks(manager *SnapshotManager, chunkSize int, numberOfChunks int) []string {
chunkList := make([]string, 0)
for i := 0; i < numberOfChunks; i++ {
chunkHash := uploadRandomChunk(manager, chunkSize)
chunkList = append(chunkList, chunkHash)
}
return chunkList
}
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string, tag string) {
snapshot := &Snapshot{
ID: snapshotID,
@@ -148,6 +161,7 @@ func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision in
StartTime: startTime,
EndTime: endTime,
ChunkHashes: chunkHashes,
Tag: tag,
}
var chunkHashesInHex []string
@@ -199,7 +213,7 @@ func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expecte
snapshot := manager.DownloadSnapshot(snapshotID, revision)
numberOfSnapshots++
for _, chunk := range manager.GetSnapshotChunks(snapshot) {
for _, chunk := range manager.GetSnapshotChunks(snapshot, false) {
chunks[chunk] = true
}
}
@@ -222,7 +236,7 @@ func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expecte
}
}
func TestSingleRepositoryPrune(t *testing.T) {
func TestPruneSingleRepository(t *testing.T) {
setTestingT(t)
@@ -238,34 +252,35 @@ func TestSingleRepositoryPrune(t *testing.T) {
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 1 snapshot")
createTestSnapshot(snapshotManager, "repository1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "repository1", 1, now-4*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
createTestSnapshot(snapshotManager, "repository1", 2, now-4*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "repository1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
createTestSnapshot(snapshotManager, "repository1", 3, now-1*day-3600, now-1*day-60, []string{chunkHash3, chunkHash4})
checkTestSnapshots(snapshotManager, 3, 0)
createTestSnapshot(snapshotManager, "repository1", 3, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
createTestSnapshot(snapshotManager, "repository1", 4, now-1*day-3600, now-1*day-60, []string{chunkHash3, chunkHash4}, "tag")
checkTestSnapshots(snapshotManager, 4, 0)
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
t.Logf("Removing snapshot repository1 revisions 1 and 2 with --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1, 2}, []string{}, []string{}, false, true, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
t.Logf("Removing snapshot repository1 revision 3 without --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{3}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "repository1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
createTestSnapshot(snapshotManager, "repository1", 5, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 0)
}
func TestSingleHostPrune(t *testing.T) {
func TestPruneSingleHost(t *testing.T) {
setTestingT(t)
@@ -282,31 +297,31 @@ func TestSingleHostPrune(t *testing.T) {
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4}, "tag")
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 0)
}
func TestMultipleHostPrune(t *testing.T) {
func TestPruneMultipleHost(t *testing.T) {
setTestingT(t)
@@ -323,35 +338,35 @@ func TestMultipleHostPrune(t *testing.T) {
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshot")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4}, "tag")
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm2@host2", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
createTestSnapshot(snapshotManager, "vm2@host2", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Creating 1 snapshot")
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash5, chunkHash6})
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash5, chunkHash6}, "tag")
checkTestSnapshots(snapshotManager, 4, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 4, 0)
}
@@ -371,25 +386,25 @@ func TestPruneAndResurrect(t *testing.T) {
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 1 snapshot")
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash1})
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash1}, "tag")
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 0)
}
func TestInactiveHostPrune(t *testing.T) {
func TestPruneWithInactiveHost(t *testing.T) {
setTestingT(t)
@@ -406,31 +421,31 @@ func TestInactiveHostPrune(t *testing.T) {
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshot")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
// Host2 is inactive
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-7*day-3600, now-7*day-60, []string{chunkHash3, chunkHash4})
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-7*day-3600, now-7*day-60, []string{chunkHash3, chunkHash4}, "tag")
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 0)
}
func TestRetentionPolicy(t *testing.T) {
func TestPruneWithRetentionPolicy(t *testing.T) {
setTestingT(t)
@@ -448,20 +463,225 @@ func TestRetentionPolicy(t *testing.T) {
day := int64(24 * 3600)
t.Logf("Creating 30 snapshots")
for i := 0; i < 30; i++ {
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]})
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, "tag")
}
checkTestSnapshots(snapshotManager, 30, 0)
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 12, 0)
}
func TestPruneWithRetentionPolicyAndTag(t *testing.T) {
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
var chunkHashes []string
for i := 0; i < 30; i++ {
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
}
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 30 snapshots")
for i := 0; i < 30; i++ {
tag := "auto"
if i % 3 == 0 {
tag = "manual"
}
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, tag)
}
checkTestSnapshots(snapshotManager, 30, 0)
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive and --tag manual")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{"manual"}, []string{"0:7"}, false, true, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 22, 0)
}
// Test that an unreferenced fossil shouldn't be removed as it may be the result of another prune job in-progress.
func TestPruneWithFossils(t *testing.T) {
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
// Create an unreferenced fossil
snapshotManager.storage.UploadFile(0, "chunks/113b6a2350dcfd836829c47304dd330fa6b58b93dd7ac696c6b7b913e6868662.fsl", []byte("this is a test fossil"))
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
checkTestSnapshots(snapshotManager, 2, 1)
t.Logf("Prune without removing any snapshots but with --exhaustive")
// The unreferenced fossil shouldn't be removed
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, true, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 1)
t.Logf("Prune without removing any snapshots but with --exclusive")
// Now the unreferenced fossil should be removed
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, true, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 0)
}
func TestPruneMultipleThread(t *testing.T) {
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
numberOfChunks := 256
numberOfThreads := 4
chunkList1 := uploadRandomChunks(snapshotManager, chunkSize, numberOfChunks)
chunkList2 := uploadRandomChunks(snapshotManager, chunkSize, numberOfChunks)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "repository1", 1, now-4*day-3600, now-3*day-60, chunkList1, "tag")
createTestSnapshot(snapshotManager, "repository1", 2, now-3*day-3600, now-2*day-60, chunkList2, "tag")
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot revisions 1 with --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false, numberOfThreads)
checkTestSnapshots(snapshotManager, 1, 0)
t.Logf("Creating 1 more snapshot")
chunkList3 := uploadRandomChunks(snapshotManager, chunkSize, numberOfChunks)
createTestSnapshot(snapshotManager, "repository1", 3, now-2*day-3600, now-1*day-60, chunkList3, "tag")
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false, numberOfThreads)
t.Logf("Prune without removing any snapshots but with --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, true, []string{}, false, false, false, numberOfThreads)
checkTestSnapshots(snapshotManager, 1, 0)
}
// A snapshot not seen by a fossil collection should always be consider a new snapshot in the fossil deletion step
func TestPruneNewSnapshots(t *testing.T) {
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-2*day-3600, now-2*day-60, []string{chunkHash3, chunkHash4}, "tag")
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Prune snapshot 1")
// chunkHash1 should be marked as fossil
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 2)
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
// Create another snapshot of vm1 that brings back chunkHash1
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-0*day-3600, now-0*day-60, []string{chunkHash1, chunkHash3}, "tag")
// Create another snapshot of vm2 so the fossil collection will be processed by next prune
createTestSnapshot(snapshotManager, "vm2@host1", 2, now + 3600, now + 3600 * 2, []string{chunkHash4, chunkHash5}, "tag")
// Now chunkHash1 wil be resurrected
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 4, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false);
}
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
func TestPruneGhostSnapshots(t *testing.T) {
setTestingT(t)
EnableStackTrace()
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
checkTestSnapshots(snapshotManager, 2, 0)
snapshot1, err := ioutil.ReadFile(path.Join(testDir, "snapshots", "vm1@host1", "1"))
if err != nil {
t.Errorf("Failed to read snapshot file: %v", err)
}
t.Logf("Prune snapshot 1")
// chunkHash1 should be marked as fossil
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 1, 2)
// Recover the snapshot file for revision 1; this is to simulate a scenario where prune may encounter a network error after
// leaving the fossil collection but before deleting any snapshots.
err = ioutil.WriteFile(path.Join(testDir, "snapshots", "vm1@host1", "1"), snapshot1, 0644)
if err != nil {
t.Errorf("Failed to write snapshot file: %v", err)
}
// Create another snapshot of vm1 so the fossil collection becomes eligible for processing.
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now - day - 3600, now - day - 60, []string{chunkHash3, chunkHash4}, "tag")
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 2)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false);
// Prune snapshot 1 again
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 2, 2)
// Create another snapshot
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 4, now + 3600, now + 3600 * 2, []string{chunkHash5, chunkHash5}, "tag")
checkTestSnapshots(snapshotManager, 3, 2)
// Run the prune again and this time the fossil collection will be processed and the fossils removed
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false);
}

View File

@@ -109,7 +109,7 @@ func (storage *StorageBase) SetNestingLevels(config *Config) {
exist, _, _, err := storage.DerivedStorage.GetFileInfo(0, "nesting")
if err == nil && exist {
nestingFile := CreateChunk(CreateConfig(), true)
if storage.DerivedStorage.DownloadFile(0, "config", nestingFile) == nil {
if storage.DerivedStorage.DownloadFile(0, "nesting", nestingFile) == nil {
var nesting struct {
ReadLevels []int `json:"read-levels"`
WriteLevel int `json:"write-level"`
@@ -261,7 +261,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
return fileStorage
}
urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
urlRegex := regexp.MustCompile(`^([\w-]+)://([\w\-@\.]+@)?([^/]+)(/(.+))?`)
matched := urlRegex.FindStringSubmatch(storageURL)
@@ -461,6 +461,42 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
SavePassword(preference, "s3_secret", secretKey)
return storage
} else if matched[1] == "wasabi" {
region := matched[2]
endpoint := matched[3]
bucket := matched[5]
if region != "" {
region = region[:len(region)-1]
}
key := GetPassword(preference, "wasabi_key",
"Enter Wasabi key:", true, resetPassword)
secret := GetPassword(preference, "wasabi_secret",
"Enter Wasabi secret:", true, resetPassword)
storageDir := ""
if strings.Contains(bucket, "/") {
firstSlash := strings.Index(bucket, "/")
storageDir = bucket[firstSlash+1:]
bucket = bucket[:firstSlash]
}
storage, err := CreateWasabiStorage(region, endpoint,
bucket, storageDir, key, secret, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Wasabi storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "wasabi_key", key)
SavePassword(preference, "wasabi_secret", secret)
return storage
} else if matched[1] == "dropbox" {
storageDir := matched[3] + matched[5]
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
@@ -560,6 +596,39 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
}
SavePassword(preference, "hubic_token", tokenFile)
return hubicStorage
} else if matched[1] == "swift" {
prompt := fmt.Sprintf("Enter the OpenStack Swift key:")
key := GetPassword(preference, "swift_key", prompt, true, resetPassword)
swiftStorage, err := CreateSwiftStorage(storageURL[8:], key, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the OpenStack Swift storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "swift_key", key)
return swiftStorage
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
server := matched[3]
username := matched[2]
username = username[:len(username) - 1]
storageDir := matched[5]
port := 0
useHTTP := matched[1] == "webdav-http"
if strings.Contains(server, ":") {
index := strings.Index(server, ":")
port, _ = strconv.Atoi(server[index+1:])
server = server[:index]
}
prompt := fmt.Sprintf("Enter the WebDAV password:")
password := GetPassword(preference, "webdav_password", prompt, true, resetPassword)
webDAVStorage, err := CreateWebDAVStorage(server, port, username, password, storageDir, useHTTP, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the WebDAV storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "webdav_password", password)
return webDAVStorage
} else {
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
return nil

View File

@@ -78,10 +78,14 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage, err := CreateSFTPStorageWithPassword(config["server"], port, config["username"], config["directory"], 2, config["password"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "s3" || testStorageName == "wasabi" {
} else if testStorageName == "s3" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
} else if testStorageName == "wasabi" {
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
return storage, err
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
} else if testStorageName == "s3c" {
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
@@ -138,9 +142,22 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage, err := CreateHubicStorage(config["token_file"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "memset" {
storage, err := CreateSwiftStorage(config["storage_url"], config["key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "pcloud" || testStorageName == "box" {
storage, err := CreateWebDAVStorage(config["host"], 0, config["username"], config["password"], config["storage_path"], false, threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else {
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
}
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
}
func cleanStorage(storage Storage) {
@@ -370,6 +387,7 @@ func TestStorage(t *testing.T) {
snapshotIDs := []string{}
for _, snapshotDir := range snapshotDirs {
LOG_INFO("debug", "snapshot dir: %s", snapshotDir)
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
}
@@ -496,7 +514,7 @@ func TestStorage(t *testing.T) {
} else {
err = storage.DeleteFile(0, filePath)
if err != nil {
t.Errorf("Failed to delete file %s: %v", filePath)
t.Errorf("Failed to delete file %s: %v", filePath, err)
} else {
time.Sleep(time.Duration(delay) * time.Second)
filePath, exist, _, err = storage.FindChunk(0, chunks[1], true)
@@ -583,12 +601,11 @@ func TestCleanStorage(t *testing.T) {
storage.DeleteFile(0, "config")
LOG_INFO("DELETE_FILE", "Deleted config")
files, _, err := storage.ListFiles(0, "chunks/")
for _, file := range files {
if len(file) > 0 && file[len(file)-1] != '/' {
LOG_DEBUG("FILE_EXIST", "File %s exists after deletion", file)
}
if len(file) > 0 && file[len(file)-1] != '/' {
LOG_DEBUG("FILE_EXIST", "File %s exists after deletion", file)
}
}
}

View File

@@ -0,0 +1,251 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"strconv"
"strings"
"time"
"github.com/ncw/swift"
)
type SwiftStorage struct {
StorageBase
connection *swift.Connection
container string
storageDir string
threads int
}
// CreateSwiftStorage creates an OpenStack Swift storage object. storageURL is in the form of
// `user@authURL/container/path?arg1=value1&arg2=value2``
func CreateSwiftStorage(storageURL string, key string, threads int) (storage *SwiftStorage, err error) {
// This is the map to store all arguments
arguments := make(map[string]string)
// Check if there are arguments provided as a query string
if strings.Contains(storageURL, "?") {
urlAndArguments := strings.SplitN(storageURL, "?", 2)
storageURL = urlAndArguments[0]
for _, pair := range strings.Split(urlAndArguments[1], "&") {
if strings.Contains(pair, "=") {
keyAndValue := strings.Split(pair, "=")
arguments[keyAndValue[0]] = keyAndValue[1]
}
}
}
// Take out the user name if there is one
if strings.Contains(storageURL, "@") {
userAndURL := strings.Split(storageURL, "@")
arguments["user"] = userAndURL[0]
storageURL = userAndURL[1]
}
// The version is used to split authURL and container/path
versions := []string{"/v1/", "/v1.0/", "/v2/", "/v2.0/", "/v3/", "/v3.0/", "/v4/", "/v4.0/"}
storageDir := ""
for _, version := range versions {
if strings.Contains(storageURL, version) {
urlAndStorageDir := strings.SplitN(storageURL, version, 2)
storageURL = urlAndStorageDir[0] + version[0:len(version)-1]
storageDir = urlAndStorageDir[1]
}
}
// If no container/path is specified, find them from the arguments
if storageDir == "" {
storageDir = arguments["storage_dir"]
}
// Now separate the container name from the storage path
container := ""
if strings.Contains(storageDir, "/") {
containerAndStorageDir := strings.SplitN(storageDir, "/", 2)
container = containerAndStorageDir[0]
storageDir = containerAndStorageDir[1]
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}
} else {
container = storageDir
storageDir = ""
}
// Number of retries on err
retries := 4
if value, ok := arguments["retries"]; ok {
retries, _ = strconv.Atoi(value)
}
// Connect channel timeout
connectionTimeout := 10
if value, ok := arguments["connection_timeout"]; ok {
connectionTimeout, _ = strconv.Atoi(value)
}
// Data channel timeout
timeout := 60
if value, ok := arguments["timeout"]; ok {
timeout, _ = strconv.Atoi(value)
}
// Auth version; default to auto-detect
authVersion := 0
if value, ok := arguments["auth_version"]; ok {
authVersion, _ = strconv.Atoi(value)
}
// Allow http to be used by setting "protocol=http" in arguments
if _, ok := arguments["protocol"]; !ok {
arguments["protocol"] = "https"
}
// Please refer to https://godoc.org/github.com/ncw/swift#Connection
connection := swift.Connection{
Domain: arguments["domain"],
DomainId: arguments["domain_id"],
UserName: arguments["user"],
UserId: arguments["user_id"],
ApiKey: key,
AuthUrl: arguments["protocol"] + "://" + storageURL,
Retries: retries,
UserAgent: arguments["user_agent"],
ConnectTimeout: time.Duration(connectionTimeout) * time.Second,
Timeout: time.Duration(timeout) * time.Second,
Region: arguments["region"],
AuthVersion: authVersion,
Internal: false,
Tenant: arguments["tenant"],
TenantId: arguments["tenant_id"],
EndpointType: swift.EndpointType(arguments["endpiont_type"]),
TenantDomain: arguments["tenant_domain"],
TenantDomainId: arguments["tenant_domain_id"],
TrustId: arguments["trust_id"],
}
_, _, err = connection.Container(container)
if err != nil {
return nil, err
}
storage = &SwiftStorage{
connection: &connection,
container: container,
storageDir: storageDir,
threads: threads,
}
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{1}, 1)
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
if len(dir) > 0 && dir[len(dir)-1] != '/' {
dir += "/"
}
isSnapshotDir := dir == "snapshots/"
dir = storage.storageDir + dir
options := swift.ObjectsOpts{
Prefix: dir,
Limit: 1000,
}
if isSnapshotDir {
options.Delimiter = '/'
}
objects, err := storage.connection.ObjectsAll(storage.container, &options)
if err != nil {
return nil, nil, err
}
for _, obj := range objects {
if isSnapshotDir {
if obj.SubDir != "" {
files = append(files, obj.SubDir[len(dir):])
sizes = append(sizes, 0)
}
} else {
files = append(files, obj.Name[len(dir):])
sizes = append(sizes, obj.Bytes)
}
}
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *SwiftStorage) DeleteFile(threadIndex int, filePath string) (err error) {
return storage.connection.ObjectDelete(storage.container, storage.storageDir+filePath)
}
// MoveFile renames the file.
func (storage *SwiftStorage) MoveFile(threadIndex int, from string, to string) (err error) {
return storage.connection.ObjectMove(storage.container, storage.storageDir+from,
storage.container, storage.storageDir+to)
}
// CreateDirectory creates a new directory.
func (storage *SwiftStorage) CreateDirectory(threadIndex int, dir string) (err error) {
// Does nothing as directories do not exist in OpenStack Swift
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
object, _, err := storage.connection.Object(storage.container, storage.storageDir+filePath)
if err != nil {
if err == swift.ObjectNotFound {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
return true, false, object.Bytes, nil
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, _, err := storage.connection.ObjectOpen(storage.container, storage.storageDir+filePath, false, nil)
if err != nil {
return err
}
_, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.threads)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *SwiftStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.threads)
_, err = storage.connection.ObjectPut(storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
return err
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *SwiftStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *SwiftStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *SwiftStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *SwiftStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *SwiftStorage) EnableTestMode() {
}

View File

@@ -344,11 +344,13 @@ func MatchPath(filePath string, patterns []string) (included bool) {
for _, pattern := range patterns {
if pattern[0] == '+' {
if matchPattern(filePath, pattern[1:]) {
LOG_DEBUG("PATTERN_INCLUDE", "%s is included by pattern %s", filePath, pattern)
return true
}
} else if pattern[0] == '-' {
allIncludes = false
if matchPattern(filePath, pattern[1:]) {
LOG_DEBUG("PATTERN_EXCLUDE", "%s is excluded by pattern %s", filePath, pattern)
return false
}
} else if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
@@ -363,7 +365,14 @@ func MatchPath(filePath string, patterns []string) (included bool) {
matched = re.MatchString(filePath)
}
if matched {
return strings.HasPrefix(pattern, "i:")
if strings.HasPrefix(pattern, "i:") {
LOG_DEBUG("PATTERN_INCLUDE", "%s is included by pattern %s", filePath, pattern)
return true
} else {
LOG_DEBUG("PATTERN_EXCLUDE", "%s is excluded by pattern %s", filePath, pattern)
return false
}
} else {
if strings.HasPrefix(pattern, "e:") {
allIncludes = false
@@ -372,7 +381,13 @@ func MatchPath(filePath string, patterns []string) (included bool) {
}
}
return !allIncludes
if allIncludes {
LOG_DEBUG("PATTERN_EXCLUDE", "%s is excluded", filePath)
return false
} else {
LOG_DEBUG("PATTERN_INCLUDE", "%s is included", filePath)
return true
}
}
func joinPath(components ...string) string {

View File

@@ -35,7 +35,7 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
if entry.UID != -1 && entry.GID != -1 {
err := os.Chown(fullPath, entry.UID, entry.GID)
err := os.Lchown(fullPath, entry.UID, entry.GID)
if err != nil {
LOG_ERROR("RESTORE_CHOWN", "Failed to change uid or gid: %v", err)
return false
@@ -69,7 +69,7 @@ func (entry *Entry) SetAttributesToFile(fullPath string) {
newAttribute, found := entry.Attributes[name]
if found {
oldAttribute, _ := xattr.Getxattr(fullPath, name)
if bytes.Equal(oldAttribute, newAttribute) {
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.Setxattr(fullPath, name, newAttribute)
}
delete(entry.Attributes, name)

View File

@@ -112,7 +112,7 @@ func TestRateLimit(t *testing.T) {
return
}
if int(n) != len(content) {
t.Errorf("Wrote %s bytes instead of %s", n, len(content))
t.Errorf("Wrote %d bytes instead of %d", n, len(content))
return
}
@@ -127,7 +127,7 @@ func TestRateLimit(t *testing.T) {
return
}
if int(n) != len(content) {
t.Errorf("Copied %s bytes instead of %s", n, len(content))
t.Errorf("Copied %d bytes instead of %d", n, len(content))
return
}

View File

@@ -0,0 +1,192 @@
//
// Storage module for Wasabi (https://www.wasabi.com)
//
// Wasabi is nominally compatible with AWS S3, but the copy-and-delete
// method used for renaming objects creates additional expense under
// Wasabi's billing system. This module is a pass-through to the
// existing S3 module for everything other than that one operation.
//
// This module copyright 2017 Mark Feit (https://github.com/markfeit)
// and may be distributed under the same terms as Duplicacy.
package duplicacy
import (
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"errors"
"fmt"
"net/http"
"time"
)
type WasabiStorage struct {
StorageBase
s3 *S3Storage
region string
endpoint string
bucket string
storageDir string
key string
secret string
client *http.Client
}
// See the Storage interface in duplicacy_storage.go for function
// descriptions.
func CreateWasabiStorage(
regionName string, endpoint string,
bucketName string, storageDir string,
accessKey string, secretKey string,
threads int,
) (storage *WasabiStorage, err error) {
s3storage, error := CreateS3Storage(regionName, endpoint, bucketName,
storageDir, accessKey, secretKey, threads,
true, // isSSLSupported
false, // isMinioCompatible
)
if err != nil {
return nil, error
}
wasabi := &WasabiStorage{
// Pass-through to existing S3 module
s3: s3storage,
// Local copies required for renaming
region: regionName,
endpoint: endpoint,
bucket: bucketName,
storageDir: storageDir,
key: accessKey,
secret: secretKey,
client: &http.Client{},
}
wasabi.DerivedStorage = wasabi
wasabi.SetDefaultNestingLevels([]int{0}, 0)
return wasabi, nil
}
func (storage *WasabiStorage) ListFiles(
threadIndex int, dir string,
) (files []string, sizes []int64, err error) {
return storage.s3.ListFiles(threadIndex, dir)
}
func (storage *WasabiStorage) DeleteFile(
threadIndex int, filePath string,
) (err error) {
return storage.s3.DeleteFile(threadIndex, filePath)
}
// This is a lightweight implementation of a call to Wasabi for a
// rename. It's designed to get the job done with as few dependencies
// on other packages as possible rather than being somethng
// general-purpose and reusable.
func (storage *WasabiStorage) MoveFile(
threadIndex int, from string, to string,
) (err error) {
var from_path string
// The from path includes the bucket. Take care not to include an empty storageDir
// string as Wasabi's backend will return 404 on URLs with double slashes.
if (storage.storageDir == "") {
from_path = fmt.Sprintf("/%s/%s", storage.bucket, from)
} else {
from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
}
object := fmt.Sprintf("https://%s@%s%s",
storage.region, storage.endpoint, from_path)
// The object's new name is relative to the top of the bucket.
new_name := fmt.Sprintf("%s/%s", storage.storageDir, to)
timestamp := time.Now().Format(time.RFC1123Z)
signing_string := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, from_path)
signer := hmac.New(sha1.New, []byte(storage.secret))
signer.Write([]byte(signing_string))
signature := base64.StdEncoding.EncodeToString(signer.Sum(nil))
authorization := fmt.Sprintf("AWS %s:%s", storage.key, signature)
request, error := http.NewRequest("MOVE", object, nil)
if error != nil {
return error
}
request.Header.Add("Authorization", authorization)
request.Header.Add("Date", timestamp)
request.Header.Add("Destination", new_name)
request.Header.Add("Host", storage.endpoint)
request.Header.Add("Overwrite", "true")
response, error := storage.client.Do(request)
if error != nil {
return error
}
defer response.Body.Close()
if response.StatusCode != 200 {
return errors.New(response.Status)
}
return nil
}
func (storage *WasabiStorage) CreateDirectory(
threadIndex int, dir string,
) (err error) {
return storage.s3.CreateDirectory(threadIndex, dir)
}
func (storage *WasabiStorage) GetFileInfo(
threadIndex int, filePath string,
) (exist bool, isDir bool, size int64, err error) {
return storage.s3.GetFileInfo(threadIndex, filePath)
}
func (storage *WasabiStorage) DownloadFile(
threadIndex int, filePath string, chunk *Chunk,
) (err error) {
return storage.s3.DownloadFile(threadIndex, filePath, chunk)
}
func (storage *WasabiStorage) UploadFile(
threadIndex int, filePath string, content []byte,
) (err error) {
return storage.s3.UploadFile(threadIndex, filePath, content)
}
func (storage *WasabiStorage) IsCacheNeeded() bool {
return storage.s3.IsCacheNeeded()
}
func (storage *WasabiStorage) IsMoveFileImplemented() bool {
// This is implemented locally since S3 does a copy and delete
return true
}
func (storage *WasabiStorage) IsStrongConsistent() bool {
// Wasabi has it, S3 doesn't.
return true
}
func (storage *WasabiStorage) IsFastListing() bool {
return storage.s3.IsFastListing()
}
func (storage *WasabiStorage) EnableTestMode() {
}

View File

@@ -0,0 +1,450 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
//
//
// This storage backend is based on the work by Yuri Karamani from https://github.com/karamani/webdavclnt,
// released under the MIT license.
//
package duplicacy
import (
"bytes"
"encoding/xml"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
//"net/http/httputil"
"strconv"
"sync"
"time"
"strings"
)
type WebDAVStorage struct {
StorageBase
host string
port int
username string
password string
storageDir string
useHTTP bool
client *http.Client
threads int
directoryCache map[string]int // stores directories known to exist by this backend
directoryCacheLock sync.Mutex // lock for accessing directoryCache
}
var (
errWebDAVAuthorizationFailure = errors.New("Authentication failed")
errWebDAVMovedPermanently = errors.New("Moved permanently")
errWebDAVNotExist = errors.New("Path does not exist")
errWebDAVMaximumBackoff = errors.New("Maximum backoff reached")
errWebDAVMethodNotAllowed = errors.New("Method not allowed")
)
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
if storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}
storage = &WebDAVStorage{
host: host,
port: port,
username: username,
password: password,
storageDir: "",
useHTTP: false,
client: http.DefaultClient,
threads: threads,
directoryCache: make(map[string]int),
}
// Make sure it doesn't follow redirect
storage.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
exist, isDir, _, err := storage.GetFileInfo(0, storageDir)
if err != nil {
return nil, err
}
if !exist {
return nil, fmt.Errorf("Storage path %s does not exist", storageDir)
}
if !isDir {
return nil, fmt.Errorf("Storage path %s is not a directory", storageDir)
}
storage.storageDir = storageDir
for _, dir := range []string{"snapshots", "chunks"} {
storage.CreateDirectory(0, dir)
}
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{0}, 0)
return storage, nil
}
func (storage *WebDAVStorage) createConnectionString(uri string) string {
url := storage.host
if storage.useHTTP {
url = "http://" + url
} else {
url = "https://" + url
}
if storage.port > 0 {
url += fmt.Sprintf(":%d", storage.port)
}
return url + "/" + storage.storageDir + uri
}
func (storage *WebDAVStorage) retry(backoff int) int {
delay := rand.Intn(backoff*500) + backoff*500
time.Sleep(time.Duration(delay) * time.Millisecond)
backoff *= 2
return backoff
}
func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int, data []byte) (io.ReadCloser, http.Header, error) {
backoff := 1
for i := 0; i < 8; i++ {
var dataReader io.Reader
headers := make(map[string]string)
if method == "PROPFIND" {
headers["Content-Type"] = "application/xml"
headers["Depth"] = fmt.Sprintf("%d", depth)
dataReader = bytes.NewReader(data)
} else if method == "PUT" {
headers["Content-Type"] = "application/octet-stream"
dataReader = CreateRateLimitedReader(data, storage.UploadRateLimit/storage.threads)
} else if method == "MOVE" {
headers["Destination"] = storage.createConnectionString(string(data))
headers["Content-Type"] = "application/octet-stream"
dataReader = bytes.NewReader([]byte(""))
} else {
headers["Content-Type"] = "application/octet-stream"
dataReader = bytes.NewReader(data)
}
request, err := http.NewRequest(method, storage.createConnectionString(uri), dataReader)
if err != nil {
return nil, nil, err
}
if len(storage.username) > 0 {
request.SetBasicAuth(storage.username, storage.password)
}
for key, value := range headers {
request.Header.Set(key, value)
}
//requestDump, err := httputil.DumpRequest(request, true)
//LOG_INFO("debug", "Request: %s", requestDump)
response, err := storage.client.Do(request)
if err != nil {
LOG_TRACE("WEBDAV_RETRY", "URL request '%s %s' returned an error (%v)", method, uri, err)
backoff = storage.retry(backoff)
continue
}
if response.StatusCode < 300 {
return response.Body, response.Header, nil
}
if response.StatusCode == 301 {
return nil, nil, errWebDAVMovedPermanently
}
response.Body.Close()
if response.StatusCode == 404 {
// Retry if it is UPLOAD, otherwise return immediately
if method != "PUT" {
return nil, nil, errWebDAVNotExist
}
} else if response.StatusCode == 405 {
return nil, nil, errWebDAVMethodNotAllowed
}
LOG_INFO("WEBDAV_RETRY", "URL request '%s %s' returned status code %d", method, uri, response.StatusCode)
backoff = storage.retry(backoff)
}
return nil, nil, errWebDAVMaximumBackoff
}
type WebDAVProperties map[string]string
type WebDAVPropValue struct {
XMLName xml.Name `xml:""`
Value string `xml:",innerxml"`
}
type WebDAVProp struct {
PropList []WebDAVPropValue `xml:",any"`
}
type WebDAVPropStat struct {
Prop *WebDAVProp `xml:"prop"`
}
type WebDAVResponse struct {
Href string `xml:"href"`
PropStat *WebDAVPropStat `xml:"propstat"`
}
type WebDAVMultiStatus struct {
Responses []WebDAVResponse `xml:"response"`
}
func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ...string) (map[string]WebDAVProperties, error) {
propfind := "<prop>"
for _, p := range properties {
propfind += fmt.Sprintf("<%s/>", p)
}
propfind += "</prop>"
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
if err != nil {
return nil, err
}
defer readCloser.Close()
content, err := ioutil.ReadAll(readCloser)
if err != nil {
return nil, err
}
object := WebDAVMultiStatus{}
err = xml.Unmarshal(content, &object)
if err != nil {
return nil, err
}
if object.Responses == nil || len(object.Responses) == 0 {
return nil, errors.New("no WebDAV responses")
}
responses := make(map[string]WebDAVProperties)
for _, responseTag := range object.Responses {
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
return nil, errors.New("no WebDAV properties")
}
properties := make(WebDAVProperties)
for _, prop := range responseTag.PropStat.Prop.PropList {
properties[prop.XMLName.Local] = prop.Value
}
responseKey := responseTag.Href
responses[responseKey] = properties
}
return responses, nil
}
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
if dir[len(dir)-1] != '/' {
dir += "/"
}
properties, err := storage.getProperties(dir, 1, "getcontentlength", "resourcetype")
if err != nil {
return nil, nil, err
}
prefixLength := len(storage.storageDir) + len(dir) + 1
for file, m := range properties {
if len(file) <= prefixLength {
continue
}
isDir := false
size := 0
if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
isDir = true
} else if length, exist := m["getcontentlength"]; exist {
if length == "" {
isDir = true
} else {
size, _ = strconv.Atoi(length)
}
} else {
continue
}
if !isDir {
if dir != "snapshots/" {
files = append(files, file[prefixLength:])
sizes = append(sizes, int64(size))
}
} else {
// This is a dir
file := file[prefixLength:]
if file[len(file)-1] != '/' {
file += "/"
}
files = append(files, file)
sizes = append(sizes, int64(0))
}
}
return files, sizes, nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
properties, err := storage.getProperties(filePath, 0, "getcontentlength", "resourcetype")
if err != nil {
if err == errWebDAVNotExist {
return false, false, 0, nil
}
if err == errWebDAVMovedPermanently {
// This must be a directory
return true, true, 0, nil
}
return false, false, 0, err
}
if m, exist := properties["/" + storage.storageDir + filePath]; !exist {
return false, false, 0, nil
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
return true, true, 0, nil
} else if length, exist := m["getcontentlength"]; exist && length != ""{
value, _ := strconv.Atoi(length)
return true, false, int64(value), nil
} else {
return true, true, 0, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *WebDAVStorage) DeleteFile(threadIndex int, filePath string) (err error) {
readCloser, _, err := storage.sendRequest("DELETE", filePath, 0, []byte(""))
if err != nil {
return err
}
readCloser.Close()
return nil
}
// MoveFile renames the file.
func (storage *WebDAVStorage) MoveFile(threadIndex int, from string, to string) (err error) {
readCloser, _, err := storage.sendRequest("MOVE", from, 0, []byte(to))
if err != nil {
return err
}
readCloser.Close()
return nil
}
// createParentDirectory creates the parent directory if it doesn't exist in the cache
func (storage *WebDAVStorage) createParentDirectory(threadIndex int, dir string) (err error) {
found := strings.LastIndex(dir, "/")
if found == -1 {
return nil
}
parent := dir[:found]
storage.directoryCacheLock.Lock()
_, exist := storage.directoryCache[parent]
storage.directoryCacheLock.Unlock()
if exist {
return nil
}
err = storage.CreateDirectory(threadIndex, parent)
if err == nil {
storage.directoryCacheLock.Lock()
storage.directoryCache[parent] = 1
storage.directoryCacheLock.Unlock()
}
return err
}
// CreateDirectory creates a new directory.
func (storage *WebDAVStorage) CreateDirectory(threadIndex int, dir string) (err error) {
for dir != "" && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
if dir == "" {
return nil
}
// If there is an error in creating the parent directory, proceed anyway
storage.createParentDirectory(threadIndex, dir)
readCloser, _, err := storage.sendRequest("MKCOL", dir, 0, []byte(""))
if err != nil {
if err == errWebDAVMethodNotAllowed || err == errWebDAVMovedPermanently {
// We simply ignore these errors and assume that the directory already exists
return nil
}
return err
}
readCloser.Close()
return nil
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *WebDAVStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, _, err := storage.sendRequest("GET", filePath, 0, nil)
if err != nil {
return err
}
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *WebDAVStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
// If there is an error in creating the parent directory, proceed anyway
storage.createParentDirectory(threadIndex, filePath)
readCloser, _, err := storage.sendRequest("PUT", filePath, 0, content)
if err != nil {
return err
}
readCloser.Close()
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *WebDAVStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *WebDAVStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *WebDAVStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *WebDAVStorage) IsFastListing() bool { return false }
// Enable the test mode.
func (storage *WebDAVStorage) EnableTestMode() {}