mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-04 20:54:44 -06:00
Compare commits
226 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c829b80527 | ||
|
|
81e889ef3f | ||
|
|
1d5b910f5e | ||
|
|
ce946f7745 | ||
|
|
b9e89b2530 | ||
|
|
63aa47f193 | ||
|
|
214a119507 | ||
|
|
34e49d4589 | ||
|
|
8da36e9998 | ||
|
|
fe9cd7c8a8 | ||
|
|
90e1639611 | ||
|
|
1925b8d5fd | ||
|
|
65fca6f5c8 | ||
|
|
8fbef22429 | ||
|
|
1dcb3a05fc | ||
|
|
91d31f4091 | ||
|
|
579330b23c | ||
|
|
5caa15eeb8 | ||
|
|
652ebaca16 | ||
|
|
2bd9406244 | ||
|
|
9ac6e8713f | ||
|
|
dc9df61d37 | ||
|
|
73ed56e9cc | ||
|
|
69286a5413 | ||
|
|
5e6c2cc9c5 | ||
|
|
a6d071e1b5 | ||
|
|
8600803ba0 | ||
|
|
a6de3c1e74 | ||
|
|
669d5ed3f4 | ||
|
|
eb1c26b319 | ||
|
|
86767b3df6 | ||
|
|
5d905c83b8 | ||
|
|
57edf5823d | ||
|
|
7e1fb6130a | ||
|
|
8ad981b64d | ||
|
|
787c421a0c | ||
|
|
b0b08cec4c | ||
|
|
9608a7f6b6 | ||
|
|
bdea4bed15 | ||
|
|
0db7470af5 | ||
|
|
c08a26a0c2 | ||
|
|
b788b9887c | ||
|
|
4640c20dec | ||
|
|
47137b85e3 | ||
|
|
9d38b49e42 | ||
|
|
b0a67cefb7 | ||
|
|
6fd85fc687 | ||
|
|
b2ad6da364 | ||
|
|
a342431b3c | ||
|
|
ff27cec2af | ||
|
|
746c1656a8 | ||
|
|
2f6287a45d | ||
|
|
32d0f97bfb | ||
|
|
86a6ededab | ||
|
|
6e3c1657fa | ||
|
|
be89d8d0dc | ||
|
|
f044d37b28 | ||
|
|
04debec0a1 | ||
|
|
0784644996 | ||
|
|
f57fe55543 | ||
|
|
be2c3931cd | ||
|
|
a5d3340837 | ||
|
|
bd39302eee | ||
|
|
0dd138e16f | ||
|
|
7162d8916e | ||
|
|
f9603dad3c | ||
|
|
80742ce2ba | ||
|
|
ce52ec1e5d | ||
|
|
8841ced1f5 | ||
|
|
5031ae15d0 | ||
|
|
3dad87f13a | ||
|
|
6c96c52a93 | ||
|
|
2c2884abfb | ||
|
|
ed52850c98 | ||
|
|
46917ddf6b | ||
|
|
923cd0aa63 | ||
|
|
0fee771a74 | ||
|
|
b3d1eb36bd | ||
|
|
3c03b566ae | ||
|
|
978212fd75 | ||
|
|
bb1a15382e | ||
|
|
d20ea41cd0 | ||
|
|
ef19a3705f | ||
|
|
fc71cb1b49 | ||
|
|
6a03a98f55 | ||
|
|
45bc778898 | ||
|
|
d5d7649041 | ||
|
|
f1fe64b9cc | ||
|
|
e2fe57e959 | ||
|
|
ae44bf7226 | ||
|
|
fab9cc77c6 | ||
|
|
c63621cb8c | ||
|
|
f20e823119 | ||
|
|
805f6fd15d | ||
|
|
f25783d59d | ||
|
|
3cf3ad06fa | ||
|
|
d3cea2c7d0 | ||
|
|
f74ea0368e | ||
|
|
6bffef36bf | ||
|
|
b56d7dedba | ||
|
|
554f63263f | ||
|
|
bfb7370ff2 | ||
|
|
03c2a190ee | ||
|
|
491252e3e4 | ||
|
|
84fc1343a7 | ||
|
|
c42a5a86a4 | ||
|
|
d1817ae557 | ||
|
|
eb4c875fd0 | ||
|
|
cecb73071e | ||
|
|
0bf66168fb | ||
|
|
d8573ca789 | ||
|
|
6b2f50a1e8 | ||
|
|
81b8550232 | ||
|
|
f6e2877948 | ||
|
|
de2f7c447f | ||
|
|
3c1057a3c6 | ||
|
|
8808ad5c28 | ||
|
|
707967e91b | ||
|
|
3f83890859 | ||
|
|
68fb6d671e | ||
|
|
b04ef67d26 | ||
|
|
72ba2dfa87 | ||
|
|
b41e8a24a9 | ||
|
|
a3aa575c68 | ||
|
|
e765575210 | ||
|
|
044e1862e5 | ||
|
|
612c5b7746 | ||
|
|
457e518151 | ||
|
|
34afc6f93c | ||
|
|
030cd274c2 | ||
|
|
197d20f0e0 | ||
|
|
93cfbf27cb | ||
|
|
46ec852d4d | ||
|
|
dfa6113279 | ||
|
|
d7fdb5fe7f | ||
|
|
37ebbc4736 | ||
|
|
3ae2de241e | ||
|
|
4adb8dbf70 | ||
|
|
41e3d267e5 | ||
|
|
3e23b0c61c | ||
|
|
b7f537de3c | ||
|
|
0c8a88d15a | ||
|
|
204f56e939 | ||
|
|
4a80d94b63 | ||
|
|
3729de1c67 | ||
|
|
6f70b37d61 | ||
|
|
7baf8702a3 | ||
|
|
8fce6f5f83 | ||
|
|
fd362be54a | ||
|
|
0c13da9872 | ||
|
|
4912911017 | ||
|
|
f69550d0db | ||
|
|
799b040913 | ||
|
|
41e3843bfa | ||
|
|
9e1d2ac1e6 | ||
|
|
bc40498d1b | ||
|
|
446bb4bcc8 | ||
|
|
150ea13a0d | ||
|
|
8c5b7d5f63 | ||
|
|
315dfff7d6 | ||
|
|
0bc475ca4d | ||
|
|
a0fa0fe7da | ||
|
|
01db72080c | ||
|
|
22ddc04698 | ||
|
|
2aa3b2b737 | ||
|
|
76f75cb0cb | ||
|
|
ea4c4339e6 | ||
|
|
fa294eabf4 | ||
|
|
0ec262fd93 | ||
|
|
db3e0946bb | ||
|
|
c426bf5af2 | ||
|
|
823b82060c | ||
|
|
4308e3e6e9 | ||
|
|
0391ecf941 | ||
|
|
7ecf895d85 | ||
|
|
a43114da99 | ||
|
|
caaff6b4b2 | ||
|
|
18964e89a1 | ||
|
|
2d1ea86d8e | ||
|
|
d881ac9169 | ||
|
|
1aee9bd6ef | ||
|
|
f3447bb611 | ||
|
|
9be4927c87 | ||
|
|
a0fcb8802b | ||
|
|
58cfeec6ab | ||
|
|
0d442e736d | ||
|
|
b32bda162d | ||
|
|
e6767bfad4 | ||
|
|
0b9e23fcd8 | ||
|
|
7f04a79111 | ||
|
|
211c6867d3 | ||
|
|
4a31fcfb68 | ||
|
|
6a4b1f2a3f | ||
|
|
483ae5e6eb | ||
|
|
f8d879d414 | ||
|
|
c2120ad3d5 | ||
|
|
f8764a5a79 | ||
|
|
736b4da0c3 | ||
|
|
0aa122609a | ||
|
|
18462cf585 | ||
|
|
e06283f0b3 | ||
|
|
b4f3142275 | ||
|
|
cdd1f26079 | ||
|
|
199e312bea | ||
|
|
88141216e9 | ||
|
|
f9ede565ff | ||
|
|
93a61a6e49 | ||
|
|
7d31199631 | ||
|
|
f2451911f2 | ||
|
|
ac655c8780 | ||
|
|
c31d2a30d9 | ||
|
|
83da36cae0 | ||
|
|
96e2f78096 | ||
|
|
593b409329 | ||
|
|
5334f45998 | ||
|
|
b56baa80c3 | ||
|
|
74ab8d8c23 | ||
|
|
a7613ab7d9 | ||
|
|
65127c7ab7 | ||
|
|
09f695b3e1 | ||
|
|
2908b807b9 | ||
|
|
ba3702647b | ||
|
|
0a149cd509 | ||
|
|
2cbb72c2d0 | ||
|
|
12134ea6ad | ||
|
|
4291bc775b |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
.idea
|
||||
duplicacy_main
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Duplicacy is based on the following open source project:
|
||||
Duplicacy is based on the following open source projects:
|
||||
|
||||
| Projects | License |
|
||||
|--------|:-------:|
|
||||
@@ -8,7 +8,9 @@ Duplicacy is based on the following open source project:
|
||||
|https://github.com/Azure/azure-sdk-for-go | Apache-2.0 |
|
||||
|https://github.com/tj/go-dropbox | MIT |
|
||||
|https://github.com/aws/aws-sdk-go | Apache-2.0 |
|
||||
|https://github.com/goamz/goamz | LGPL with static link exception |
|
||||
|https://github.com/howeyc/gopass | ISC |
|
||||
|https://github.com/tmc/keyring | ISC |
|
||||
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|
||||
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|
||||
|https://github.com/go-ole/go-ole | MIT |
|
||||
|
||||
218
DESIGN.md
218
DESIGN.md
@@ -1,215 +1,5 @@
|
||||
## Lock-Free Deduplication
|
||||
|
||||
The three elements of lock-free deduplication are:
|
||||
|
||||
* Use variable-size chunking algorithm to split files into chunks
|
||||
* Store each chunk in the storage using a file name derived from its hash, and rely on the file system API to manage chunks without using a centralized indexing database
|
||||
* Apply a *two-step fossil collection* algorithm to remove chunks that become unreferenced after a backup is deleted
|
||||
|
||||
The variable-size chunking algorithm, also called Content-Defined Chunking, is well-known and has been adopted by many
|
||||
backup tools. The main advantage of the variable-size chunking algorithm over the fixed-size chunking algorithm (as used
|
||||
by rsync) is that in the former the rolling hash is only used to search for boundaries between chunks, after which a far
|
||||
more collision-resistant hash function like MD5 or SHA256 is applied on each chunk. In contrast, in the fixed-size
|
||||
chunking algorithm, for the purpose of detecting inserts or deletions, a lookup in the known hash table is required every
|
||||
time the rolling hash window is shifted by one byte, thus significantly reducing the chunk splitting performance.
|
||||
|
||||
What is novel about lock-free deduplication is the absence of a centralized indexing database for tracking all existing
|
||||
chunks and for determining which chunks are not needed any more. Instead, to check if a chunk has already been uploaded
|
||||
before, one can just perform a file lookup via the file storage API using the file name derived from the hash of the chunk.
|
||||
This effectively turns a cloud storage offering only a very limited
|
||||
set of basic file operations into a powerful modern backup backend capable of both block-level and file-level deduplication. More importantly, the absence of a centralized indexing database means that there is no need to implement a distributed locking mechanism on top of the file storage.
|
||||
|
||||
By eliminating the chunk indexing database, lock-free duplication not only reduces the code complexity but also makes the deduplication less error-prone. Each chunk is saved individually in its own file, and once saved there is no need for modification. Data corruption is therefore less likely to occur because of the immutability of chunk files. Another benefit that comes naturally from lock-free duplication is that when one client creates a new chunk, other clients that happen to have the same original file will notice that the chunk already exist and therefore will not upload the same chunk again. This pushes the deduplication to its highest level -- clients without knowledge of each other can share identical chunks with no extra effort.
|
||||
|
||||
There is one problem, though.
|
||||
Deletion of snapshots without an indexing database, when concurrent access is permitted, turns out to be a hard problem.
|
||||
If exclusive access to a file storage by a single client can be guaranteed, the deletion procedure can simply search for
|
||||
chunks not referenced by any backup and delete them. However, if concurrent access is required, an unreferenced chunk
|
||||
can't be trivially removed, because of the possibility that a backup procedure in progress may reference the same chunk.
|
||||
The ongoing backup procedure, still unknown to the deletion procedure, may have already encountered that chunk during its
|
||||
file scanning phase, but decided not to upload the chunk again since it already exists in the file storage.
|
||||
|
||||
Fortunately, there is a solution to address the deletion problem and make lock-free deduplication practical. The solution is a *two-step fossil collection* algorithm that deletes unreferenced chunks in two steps: identify and collect them in the first step, and then permanently remove them once certain conditions are met.
|
||||
|
||||
## Two-Step Fossil Collection
|
||||
|
||||
Interestingly, the two-step fossil collection algorithm hinges on a basic file operation supported almost universally, *file renaming*.
|
||||
When the deletion procedure identifies a chunk not referenced by any known snapshots, instead of deleting the chunk file
|
||||
immediately, it changes the name of the chunk file (and possibly moves it to a different directory).
|
||||
A chunk that has been renamed is called a *fossil*.
|
||||
|
||||
The fossil still exists in the file storage. Two rules are enforced regarding the access of fossils:
|
||||
|
||||
* A restore, list, or check procedure that reads existing backups can read the fossil if the original chunk cannot be found.
|
||||
* A backup procedure does not check the existence of a fossil. That is, it must upload a chunk if it cannot find the chunk, even if an equivalent fossil exists.
|
||||
|
||||
In the first step of the deletion procedure, called the *fossil collection* step, the names of all identified fossils will
|
||||
be saved in a fossil collection file. The deletion procedure then exits without performing further actions. This step has not effectively changed any chunk references due to the first fossil access rule. If a backup procedure references a chunk after it is marked as a fossil, a new chunk will be uploaded because of the second fossil access rule, as shown in Figure 1.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/fossil_collection_1.png?raw=true"
|
||||
alt="Reference after Rename"/>
|
||||
</p>
|
||||
|
||||
The second step, called the *fossil deletion* step, will permanently delete fossils, but only when two conditions are met:
|
||||
|
||||
* For each snapshot id, there is a new snapshot that was not seen by the fossil collection step
|
||||
* The new snapshot must finish after the fossil collection step
|
||||
|
||||
The first condition guarantees that if a backup procedure references a chunk before the deletion procedure turns it into a fossil, the reference will be detected in the fossil deletion step which will then turn the fossil back into a normal chunk.
|
||||
|
||||
The second condition guarantees that any backup procedure unknown to the fossil deletion step can start only after the fossil collection step finishes. Therefore, if it references a chunk that was identified as fossil in the fossil collection step, it should observe the fossil, not the chunk, so it will upload a new chunk, according to the second fossil access rule.
|
||||
|
||||
Therefore, if a backup procedure references a chunk before the chunk is marked a fossil, the fossil deletion step will not
|
||||
delete the chunk until it sees that backup procedure finishes (as indicated by the appearance of a new snapshot file uploaded to the storage). This ensures that scenarios depicted in Figure 2 will never happen.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/fossil_collection_2.png?raw=true"
|
||||
alt="Reference before Rename"/>
|
||||
</p>
|
||||
|
||||
|
||||
## Snapshot Format
|
||||
|
||||
A snapshot file is a file that the backup procedure uploads to the file storage after it finishes splitting files into
|
||||
chunks and uploading all new chunks. It mainly contains metadata for the backup overall, metadata for all the files,
|
||||
and chunk references for each file. Here is an example snapshot file for a repository containing 3 files (file1, file2,
|
||||
and dir1/file3):
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "host1",
|
||||
"revision": 1,
|
||||
"tag": "first",
|
||||
"start_time": 1455590487,
|
||||
"end_time": 1455590487,
|
||||
"files": [
|
||||
{
|
||||
"path": "file1",
|
||||
"content": "0:0:2:6108",
|
||||
"hash": "a533c0398194f93b90bd945381ea4f2adb0ad50bd99fd3585b9ec809da395b51",
|
||||
"size": 151901,
|
||||
"time": 1455590487,
|
||||
"mode": 420
|
||||
},
|
||||
{
|
||||
"path": "file2",
|
||||
"content": "2:6108:3:7586",
|
||||
"hash": "f6111c1562fde4df9c0bafe2cf665778c6e25b49bcab5fec63675571293ed644",
|
||||
"size": 172071,
|
||||
"time": 1455590487,
|
||||
"mode": 420
|
||||
},
|
||||
{
|
||||
"path": "dir1/",
|
||||
"size": 102,
|
||||
"time": 1455590487,
|
||||
"mode": 2147484096
|
||||
},
|
||||
{
|
||||
"path": "dir1/file3",
|
||||
"content": "3:7586:4:1734",
|
||||
"hash": "6bf9150424169006388146908d83d07de413de05d1809884c38011b2a74d9d3f",
|
||||
"size": 118457,
|
||||
"time": 1455590487,
|
||||
"mode": 420
|
||||
}
|
||||
],
|
||||
"chunks": [
|
||||
"9f25db00881a10a8e7bcaa5a12b2659c2358a579118ea45a73c2582681f12919",
|
||||
"6e903aace6cd05e26212fcec1939bb951611c4179c926351f3b20365ef2c212f",
|
||||
"4b0d017bce5491dbb0558c518734429ec19b8a0d7c616f68ddf1b477916621f7",
|
||||
"41841c98800d3b9faa01b1007d1afaf702000da182df89793c327f88a9aba698",
|
||||
"7c11ee13ea32e9bb21a694c5418658b39e8894bbfecd9344927020a9e3129718"
|
||||
],
|
||||
"lengths": [
|
||||
64638,
|
||||
81155,
|
||||
170593,
|
||||
124309,
|
||||
1734
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
When Duplicacy splits a file in chunks using the variable-size chunking algorithm, if the end of a file is reached and yet the boundary marker for terminating a chunk
|
||||
hasn't been found, the next file, if there is one, will be read in and the chunking algorithm continues. It is as if all
|
||||
files were packed into a big tar file which is then split into chunks.
|
||||
|
||||
The *content* field of a file indicates the indexes of starting and ending chunks and the corresponding offsets. For
|
||||
instance, *file1* starts at chunk 0 offset 0 while ends at chunk 2 offset 6108, immediately followed by *file2*.
|
||||
|
||||
The backup procedure can run in one of two modes. In the default quick mode, only modified or new files are scanned. Chunks only
|
||||
referenced by old files that have been modified are removed from the chunk sequence, and then chunks referenced by new
|
||||
files are appended. Indices for unchanged files need to be updated too.
|
||||
|
||||
In the safe mode (enabled by the -hash option), all files are scanned and the chunk sequence is regenerated.
|
||||
|
||||
The length sequence stores the lengths for all chunks, which are needed when calculating some statistics such as the total
|
||||
length of chunks. For a repository containing a large number of files, the size of the snapshot file can be tremendous.
|
||||
To make the situation worse, every time a big snapshot file would have been uploaded even if only a few files have been changed since
|
||||
last backup. To save space, the variable-size chunking algorithm is also applied to the three dynamic fields of a snapshot
|
||||
file, *files*, *chunks*, and *lengths*.
|
||||
|
||||
Chunks produced during this step are deduplicated and uploaded in the same way as regular file chunks. The final snapshot file
|
||||
contains sequences of chunk hashes and other fixed size fields:
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "host1",
|
||||
"revision": 1,
|
||||
"start_time": 1455590487,
|
||||
"tag": "first",
|
||||
"end_time": 1455590487,
|
||||
"file_sequence": [
|
||||
"21e4c69f3832e32349f653f31f13cefc7c52d52f5f3417ae21f2ef5a479c3437",
|
||||
],
|
||||
"chunk_sequence": [
|
||||
"8a36ffb8f4959394fd39bba4f4a464545ff3dd6eed642ad4ccaa522253f2d5d6"
|
||||
],
|
||||
"length_sequence": [
|
||||
"fc2758ae60a441c244dae05f035136e6dd33d3f3a0c5eb4b9025a9bed1d0c328"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
In the extreme case where the repository has not been modified since last backup, a new backup procedure will not create any new chunks,
|
||||
as shown by the following output from a real use case:
|
||||
|
||||
```
|
||||
$ duplicacy backup -stats
|
||||
Storage set to sftp://gchen@192.168.1.100/Duplicacy
|
||||
Last backup at revision 260 found
|
||||
Backup for /Users/gchen/duplicacy at revision 261 completed
|
||||
Files: 42367 total, 2,204M bytes; 0 new, 0 bytes
|
||||
File chunks: 447 total, 2,238M bytes; 0 new, 0 bytes, 0 bytes uploaded
|
||||
Metadata chunks: 6 total, 11,753K bytes; 0 new, 0 bytes, 0 bytes uploaded
|
||||
All chunks: 453 total, 2,249M bytes; 0 new, 0 bytes, 0 bytes uploaded
|
||||
Total running time: 00:00:05
|
||||
```
|
||||
|
||||
## Encryption
|
||||
|
||||
When encryption is enabled (by the -e option with the *init* or *add* command), Duplicacy will generate 4 random 256 bit keys:
|
||||
|
||||
* *Hash Key*: for generating a chunk hash from the content of a chunk
|
||||
* *ID Key*: for generating a chunk id from a chunk hash
|
||||
* *Chunk Key*: for encrypting chunk files
|
||||
* *File Key*: for encrypting non-chunk files such as snapshot files.
|
||||
|
||||
Here is a diagram showing how these keys are used:
|
||||
|
||||
<p align="center">
|
||||
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/duplicacy_encryption.png?raw=true"
|
||||
alt="encryption"/>
|
||||
</p>
|
||||
|
||||
Chunk hashes are used internally and stored in the snapshot file. They are never exposed unless the snapshot file is decrypted. Chunk ids are used as the file names for the chunks and therefore exposed. When the *cat* command is used to print out a snapshot file, the chunk hashes stored in the snapshot file will be converted into chunk ids first which are then displayed instead.
|
||||
|
||||
Chunk content is encrypted by AES-GCM, with an encryption key that is the HMAC-SHA256 of the chunk Hash with the *Chunk Key* as the secret key.
|
||||
|
||||
The snapshot is encrypted by AES-GCM too, using an encrypt key that is the HMAC-SHA256 of the file path with the *File Key* as the secret key.
|
||||
|
||||
These four random keys are saved in a file named 'config' in the storage, encrypted with a master key derived from the PBKDF2 function on
|
||||
the storage password chosen by the user.
|
||||
All documentation has been moved to our wiki page:
|
||||
|
||||
* [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication)
|
||||
* [Snapshot Format](https://github.com/gilbertchen/duplicacy/wiki/Snapshot-Format)
|
||||
* [Encryption](https://github.com/gilbertchen/duplicacy/wiki/Encryption)
|
||||
|
||||
525
GUIDE.md
525
GUIDE.md
@@ -1,505 +1,20 @@
|
||||
# Duplicacy User Guide
|
||||
|
||||
## Commands
|
||||
|
||||
#### Init
|
||||
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy init - Initialize the storage if necessary and the current directory as the repository
|
||||
|
||||
USAGE:
|
||||
duplicacy init [command options] <snapshot id> <storage url>
|
||||
|
||||
OPTIONS:
|
||||
-encrypt, -e encrypt the storage with a password
|
||||
-chunk-size, -c 4M the average size of chunks
|
||||
-max-chunk-size, -max 16M the maximum size of chunks (defaults to chunk-size * 4)
|
||||
-min-chunk-size, -min 1M the minimum size of chunks (defaults to chunk-size / 4)
|
||||
-pref-dir <preference directory path> Specify alternate location for .duplicacy preferences directory
|
||||
```
|
||||
|
||||
The *init* command first connects to the storage specified by the storage URL. If the storage has been already been
|
||||
initialized before, it will download the storage configuration (stored in the file named *config*) and ignore the options provided in the command line. Otherwise, it will create the configuration file from the options and upload the file.
|
||||
|
||||
The initialized storage will then become the default storage for other commands if the -storage option is not specified
|
||||
for those commands. This default storage actually has a name, *default*.
|
||||
|
||||
After that, it will prepare the the current working directory as the repository to be backed up. Under the hood, it will create a directory
|
||||
named *.duplicacy* in the repository and put a file named *preferences* that stores the snapshot id and encryption and storage options.
|
||||
|
||||
The snapshot id is an id used to distinguish different repositories connected to the same storage. Each repository must have a unique snapshot id. A snapshot id must contain only characters valid in Linux and Windows paths (alphabet, digits, underscore, dash, etc), but cannot include `/`, `\`, or `@`.
|
||||
|
||||
The -e option controls whether or not encryption will be enabled for the storage. If encryption is enabled, you will be prompted to enter a storage password.
|
||||
|
||||
The three chunk size parameters are passed to the variable-size chunking algorithm. Their values are important to the overall performance, especially for cloud storages. If the chunk size is too small, a lot of overhead will be in sending requests and receiving responses. If the chunk size is too large, the effect of deduplication will be less obvious as more data will need to be transferred with each chunk.
|
||||
|
||||
The -pref-dir controls the location of the preferences directory. If not specified, a directory named .duplicacy is created in the repository. If specified, it must point to a non-existing directory. The directory is created and a .duplicacy file is created in the repository. The .duplicacy file contains the absolute path name to the preferences directory.
|
||||
|
||||
Once a storage has been initialized with these parameters, these parameters cannot be modified any more.
|
||||
|
||||
#### Backup
|
||||
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy backup - Save a snapshot of the repository to the storage
|
||||
|
||||
USAGE:
|
||||
duplicacy backup [command options]
|
||||
|
||||
OPTIONS:
|
||||
-hash detect file differences by hash (rather than size and timestamp)
|
||||
-t <tag> assign a tag to the backup
|
||||
-stats show statistics during and after backup
|
||||
-threads <n> number of uploading threads
|
||||
-limit-rate <kB/s> the maximum upload rate (in kilobytes/sec)
|
||||
-vss enable the Volume Shadow Copy service (Windows only)
|
||||
-storage <storage name> backup to the specified storage instead of the default one
|
||||
```
|
||||
|
||||
The *backup* command creates a snapshot of the repository and uploads it to the storage. If -hash is not provided,
|
||||
it will upload new or modified files since last backup by comparing file sizes and timestamps.
|
||||
Otherwise, every file is scanned to detect changes.
|
||||
|
||||
You can assign a tag to the snapshot so that later you can refer to it by tag in other commands.
|
||||
|
||||
If the -stats option is specified, statistical information such as transfer speed, the number of chunks will be displayed
|
||||
throughout the backup procedure.
|
||||
|
||||
The -threads option can be used to specify more than one thread to upload chunks.
|
||||
|
||||
The -limit-rate option sets a cape on the maximum upload rate.
|
||||
|
||||
The -vss option works on Windows only to turn on the Volume Shadow Copy service such that files opened by other
|
||||
processes with exclusive locks can be read as usual.
|
||||
|
||||
When the repository can have multiple storages (added by the *add* command), you can select the storage to back up to
|
||||
by giving a storage name.
|
||||
|
||||
You can specify patterns to include/exclude files by putting them in a file named *.duplicacy/filters*. Please refer to the [Include/Exclude Patterns](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#includeexclude-patterns) section for how to specify the patterns.
|
||||
|
||||
#### Restore
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy restore - Restore the repository to a previously saved snapshot
|
||||
|
||||
USAGE:
|
||||
duplicacy restore [command options] [--] [pattern] ...
|
||||
|
||||
OPTIONS:
|
||||
-r <revision> the revision number of the snapshot (required)
|
||||
-hash detect file differences by hash (rather than size and timestamp)
|
||||
-overwrite overwrite existing files in the repository
|
||||
-delete delete files not in the snapshot
|
||||
-stats show statistics during and after restore
|
||||
-threads <n> number of downloading threads
|
||||
-limit-rate <kB/s> the maximum download rate (in kilobytes/sec)
|
||||
-storage <storage name> restore from the specified storage instead of the default one
|
||||
```
|
||||
|
||||
The *restore* command restores the repository to a previous revision. By default the restore procedure will treat
|
||||
files that have the same sizes and timestamps as those in the snapshot as unchanged files, but with the -hash option, every file will be fully scanned to make sure they are in fact unchanged.
|
||||
|
||||
By default the restore procedure will not overwriting existing files, unless the -overwrite option is specified.
|
||||
|
||||
The -delete option indicates that files not in the snapshot will be removed.
|
||||
|
||||
If the -stats option is specified, statistical information such as transfer speed, number of chunks will be displayed
|
||||
throughout the restore procedure.
|
||||
|
||||
The -threads option can be used to specify more than one thread to download chunks.
|
||||
|
||||
The -limit-rate option sets a cape on the maximum upload rate.
|
||||
|
||||
When the repository can have multiple storages (added by the *add* command), you can select the storage to restore from by specifying the storage name.
|
||||
|
||||
Unlike the *backup* procedure that reading the include/exclude patterns from a file, the *restore* procedure reads them
|
||||
from the command line. If the patterns can cause confusion to the command line argument parser, -- should be prepended to
|
||||
the patterns. Please refer to the [Include/Exclude Patterns](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#includeexclude-patterns) section for how to specify patterns.
|
||||
|
||||
|
||||
#### List
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy list - List snapshots
|
||||
|
||||
USAGE:
|
||||
duplicacy list [command options]
|
||||
|
||||
OPTIONS:
|
||||
-all, -a list snapshots with any id
|
||||
-id <snapshot id> list snapshots with the specified id rather than the default one
|
||||
-r <revision> [+] the revision number of the snapshot
|
||||
-t <tag> list snapshots with the specified tag
|
||||
-files print the file list in each snapshot
|
||||
-chunks print chunks in each snapshot or all chunks if no snapshot specified
|
||||
-reset-password take passwords from input rather than keychain/keyring or env
|
||||
-storage <storage name> retrieve snapshots from the specified storage
|
||||
```
|
||||
|
||||
The *list* command lists information about specified snapshots. By default it will list snapshots created from the
|
||||
current repository, but you can list all snapshots stored in the storage by specifying the -all option, or list snapshots
|
||||
with a different snapshot id using the -id option, and/or snapshots with a particular tag with the -t option.
|
||||
|
||||
The revision number is a number assigned to the snapshot when it is being created. This number will keep increasing
|
||||
every time a new snapshot is created from a repository. You can refer to snapshots by their revision numbers using
|
||||
the -r option, which either takes a single revision number (-r 123) or a range (-r 123-456).
|
||||
There can be multiple -r options.
|
||||
|
||||
If -files is specified, for each snapshot to be listed, this command will also print information about every file
|
||||
contained in the snapshot.
|
||||
|
||||
If -chunks is specified, the command will also print out every chunk the snapshot references.
|
||||
|
||||
The -reset-password option is used to reset stored passwords and to allow passwords to be entered again. Please refer to the [Managing Passwords](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#managing-passwords) section for more information.
|
||||
|
||||
When the repository can have multiple storages (added by the *add* command), you can specify the storage to list
|
||||
by specifying the storage name.
|
||||
|
||||
#### Check
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy check - Check the integrity of snapshots
|
||||
|
||||
USAGE:
|
||||
duplicacy check [command options]
|
||||
|
||||
OPTIONS:
|
||||
-all, -a check snapshots with any id
|
||||
-id <snapshot id> check snapshots with the specified id rather than the default one
|
||||
-r <revision> [+] the revision number of the snapshot
|
||||
-t <tag> check snapshots with the specified tag
|
||||
-fossils search fossils if a chunk can't be found
|
||||
-resurrect turn referenced fossils back into chunks
|
||||
-files verify the integrity of every file
|
||||
-stats show deduplication statistics (imply -all and all revisions)
|
||||
-storage <storage name> retrieve snapshots from the specified storage```
|
||||
```
|
||||
The *check* command checks, for each specified snapshot, that all referenced chunks exist in the storage.
|
||||
|
||||
By default the *check* command will check snapshots created from the
|
||||
current repository, but you can check all snapshots stored in the storage at once by specifying the -all option, or
|
||||
snapshots from a different repository using the -id option, and/or snapshots with a particular tag with the -t option.
|
||||
|
||||
The revision number is a number assigned to the snapshot when it is being created. This number will keep increasing
|
||||
every time a new snapshot is created from a repository. You can refer to snapshots by their revision numbers using
|
||||
the -r option, which either takes a single revision number (-r 123) or a range (-r 123-456).
|
||||
There can be multiple -r options.
|
||||
|
||||
By default the *check* command only verifies the existence of chunks. To verify the full integrity of a snapshot,
|
||||
you should specify the -files option, which will download chunks and compute file hashes in memory, to
|
||||
make sure that all hashes match.
|
||||
|
||||
By default the *check* command does not find fossils. If the -fossils option is specified, it will find
|
||||
the fossil if the referenced chunk does not exist. if the -resurrect option is specified, it will turn the fossil back into a chunk.
|
||||
|
||||
When the repository can have multiple storages (added by the *add* command), you can specify the storage to check
|
||||
by specifying the storage name.
|
||||
|
||||
|
||||
#### Cat
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy cat - Print to stdout the specified file, or the snapshot content if no file is specified
|
||||
|
||||
USAGE:
|
||||
duplicacy cat [command options] [<file>]
|
||||
|
||||
OPTIONS:
|
||||
-id <snapshot id> retrieve from the snapshot with the specified id
|
||||
-r <revision> the revision number of the snapshot
|
||||
-storage <storage name> retrieve the file from the specified storage
|
||||
```
|
||||
|
||||
The *cat* command prints a file or the entire snapshot content if no file is specified.
|
||||
|
||||
The file must be specified with a path relative to the repository.
|
||||
|
||||
You can specify a different snapshot id rather than the default id.
|
||||
|
||||
The -r option is optional. If not specified, the latest revision will be selected.
|
||||
|
||||
You can use the -storage option to select a different storage other than the default one.
|
||||
|
||||
#### Diff
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy diff - Compare two snapshots or two revisions of a file
|
||||
|
||||
USAGE:
|
||||
duplicacy diff [command options] [<file>]
|
||||
|
||||
OPTIONS:
|
||||
-id <snapshot id> diff with the snapshot with the specified id
|
||||
-r <revision> [+] the revision number of the snapshot
|
||||
-hash compute the hashes of on-disk files
|
||||
-storage <storage name> retrieve files from the specified storage
|
||||
```
|
||||
The *diff* command compares the same file in two different snapshots if a file is given, otherwise compares the
|
||||
two snapshots.
|
||||
|
||||
The file must be specified with a path relative to the repository.
|
||||
|
||||
You can specify a different snapshot id rather than the default snapshot id.
|
||||
|
||||
If only one revision is given by -r, the right hand side of the comparison will be the on-disk file.
|
||||
The -hash option can then instruct this command to compute the hash of the file.
|
||||
|
||||
You can use the -storage option to select a different storage other than the default one.
|
||||
|
||||
#### History
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy history - Show the history of a file
|
||||
|
||||
USAGE:
|
||||
duplicacy history [command options] <file>
|
||||
|
||||
OPTIONS:
|
||||
-id <snapshot id> find the file in the snapshot with the specified id
|
||||
-r <revision> [+] show history of the specified revisions
|
||||
-hash show the hash of the on-disk file
|
||||
-storage <storage name> retrieve files from the specified storage
|
||||
```
|
||||
|
||||
The *history* command shows how the hash, size, and timestamp of a file change over the specified set of revisions.
|
||||
|
||||
You can specify a different snapshot id rather than the default snapshot id, and multiple -r options to specify the
|
||||
set of revisions.
|
||||
|
||||
The -hash option is to compute the hash of the on-disk file. Otherwise, only the size and timestamp of the on-disk
|
||||
file will be included.
|
||||
|
||||
You can use the -storage option to select a different storage other than the default one.
|
||||
|
||||
#### Prune
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy prune - Prune snapshots by revision, tag, or retention policy
|
||||
|
||||
USAGE:
|
||||
duplicacy prune [command options]
|
||||
|
||||
OPTIONS:
|
||||
-id <snapshot id> delete snapshots with the specified id instead of the default one
|
||||
-all, -a match against all snapshot IDs
|
||||
-r <revision> [+] delete snapshots with the specified revisions
|
||||
-t <tag> [+] delete snapshots with the specified tags
|
||||
-keep <n:m> [+] keep 1 snapshot every n days for snapshots older than m days
|
||||
-exhaustive find all unreferenced chunks by scanning the storage
|
||||
-exclusive assume exclusive access to the storage (disable two-step fossil collection)
|
||||
-dry-run, -d show what would have been deleted
|
||||
-delete-only delete fossils previously collected (if deletable) and don't collect fossils
|
||||
-collect-only identify and collect fossils, but don't delete fossils previously collected
|
||||
-ignore <id> [+] ignore the specified snapshot id when deciding if fossils can be deleted
|
||||
-storage <storage name> prune snapshots from the specified storage
|
||||
```
|
||||
|
||||
The *prune* command implements the two-step fossil collection algorithm. It will first find fossil collection files
|
||||
from previous runs and check if contained fossils are eligible for permanent deletion (the fossil deletion step). Then it
|
||||
will search for snapshots to be deleted, mark unreferenced chunks as fossils (by renaming) and save them in a new fossil
|
||||
collection file stored locally (the fossil collection step).
|
||||
|
||||
If a snapshot id is specified, that snapshot id will be used instead of the default one. The -a option will find
|
||||
snapshots with any id. Snapshots to be deleted can be specified by revision numbers, by a tag, by retention policies,
|
||||
or by any combination of them.
|
||||
|
||||
The retention policies are specified by the -keep option, which accepts an argument in the form of two numbers *n:m*, where *n* indicates the number of days between two consecutive snapshots to keep, and *m* means that the policy only applies to snapshots at least *m* day old. If *n* is zero, any snapshots older than *m* days will be removed.
|
||||
|
||||
Here are a few sample retention policies:
|
||||
|
||||
```sh
|
||||
$ duplicacy prune -keep 1:7 # Keep 1 snapshot per day for snapshots older than 7 days
|
||||
$ duplicacy prune -keep 7:30 # Keep 1 snapshot every 7 days for snapshots older than 30 days
|
||||
$ duplicacy prune -keep 30:180 # Keep 1 snapshot every 30 days for snapshots older than 180 days
|
||||
$ duplicacy prune -keep 0:360 # Keep no snapshots older than 360 days
|
||||
```
|
||||
|
||||
Multiple -keep options must be sorted by their *m* values in decreasing order. For instance, to combine the above policies into one line, it would become:
|
||||
|
||||
```sh
|
||||
$ duplicacy prune -keep 0:360 -keep 30:180 -keep 7:30 -keep 1:7
|
||||
```
|
||||
|
||||
The -exhaustive option will scan the list of all chunks in the storage, therefore it will find not only
|
||||
unreferenced chunks from deleted snapshots, but also chunks that become unreferenced for other reasons, such as
|
||||
those from an incomplete backup. It will also find any file that does not look like a chunk file.
|
||||
In contrast, a default *prune* command will only identify
|
||||
chunks referenced by deleted snapshots but not any other snapshots.
|
||||
|
||||
The -exclusive option will assume that no other clients are accessing the storage, effectively disabling the
|
||||
*two-step fossil collection* algorithm. With this option, the *prune* command will immediately remove unreferenced chunks.
|
||||
|
||||
The -dryrun option is used to test what changes the *prune* command would have done. It is guaranteed not to make
|
||||
any changes on the storage, not even creating the local fossil collection file. The following command checks if the
|
||||
chunk directory is clean (i.e., if there are any unreferenced chunks, temporary files, or anything else):
|
||||
|
||||
```
|
||||
$ duplicacy prune -d -exclusive -exhaustive # Prints out nothing if the chunk directory is clean
|
||||
```
|
||||
|
||||
The -delete-only option will skip the fossil collection step, while the -collect-only option will skip the fossil deletion step.
|
||||
|
||||
For fossils collected in the fossil collection step to be eligible for safe deletion in the fossil deletion step, at least
|
||||
one new snapshot from *each* snapshot id must be created between two runs of the *prune* command. However, some repository
|
||||
may not be set up to back up with a regular schedule, and thus literally blocking other repositories from deleting any fossils. Duplicacy by default will ignore repositories that have no new backup in the past 7 days. It also provide an
|
||||
-ignore option that can be used to skip certain repositories when deciding the deletion criteria.
|
||||
|
||||
You can use the -storage option to select a different storage other than the default one.
|
||||
|
||||
|
||||
#### Password
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy password - Change the storage password
|
||||
|
||||
USAGE:
|
||||
duplicacy password [command options]
|
||||
|
||||
OPTIONS:
|
||||
-storage <storage name> change the password used to access the specified storage
|
||||
```
|
||||
|
||||
The *password* command decrypts the storage configuration file *config* using the old password, and re-encrypts the file
|
||||
using a new password. It does not change all the encryption keys used to encrypt and decrypt chunk files,
|
||||
snapshot files, etc.
|
||||
|
||||
You can specify the storage to change the password for when working with multiple storages.
|
||||
|
||||
|
||||
#### Add
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy add - Add an additional storage to be used for the existing repository
|
||||
|
||||
USAGE:
|
||||
duplicacy add [command options] <storage name> <snapshot id> <storage url>
|
||||
|
||||
OPTIONS:
|
||||
-encrypt, -e Encrypt the storage with a password
|
||||
-chunk-size, -c 4M the average size of chunks
|
||||
-max-chunk-size, -max 16M the maximum size of chunks (defaults to chunk-size * 4)
|
||||
-min-chunk-size, -min 1M the minimum size of chunks (defaults to chunk-size / 4)
|
||||
-compression-level, -l <level> compression level (defaults to -1)
|
||||
-copy <storage name> make the new storage copy-compatible with an existing one
|
||||
```
|
||||
|
||||
The *add* command connects another storage to the current repository. Like the *init* command, if the storage has not
|
||||
been initialized before, a storage configuration file derived from the command line options will be uploaded, but those
|
||||
options will be ignored if the configuration file already exists in the storage.
|
||||
|
||||
A unique storage name must be given in order to distinguish it from other storages.
|
||||
|
||||
The -copy option is required if later you want to copy snapshots between this storage and another storage.
|
||||
Two storages are copy-compatible if they have the same average chunk size, the same maximum chunk size,
|
||||
the same minimum chunk size, the same chunk seed (used in calculating the rolling hash in the variable-size chunks
|
||||
algorithm), and the same hash key. If the -copy option is specified, these parameters will be copied from
|
||||
the existing storage rather than from the command line.
|
||||
|
||||
#### Set
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy set - Change the options for the default or specified storage
|
||||
|
||||
USAGE:
|
||||
duplicacy set [command options]
|
||||
|
||||
OPTIONS:
|
||||
-encrypt, e[=true] encrypt the storage with a password
|
||||
-no-backup[=true] backup to this storage is prohibited
|
||||
-no-restore[=true] restore from this storage is prohibited
|
||||
-no-save-password[=true] don't save password or access keys to keychain/keyring
|
||||
-key add a key/password whose value is supplied by the -value option
|
||||
-value the value of the key/password
|
||||
-storage <storage name> use the specified storage instead of the default one
|
||||
```
|
||||
|
||||
The *set* command changes the options for the specified storage.
|
||||
|
||||
The -e option turns on the storage encryption. If specified as -e=false, it turns off the storage encryption.
|
||||
|
||||
The -no-backup option will not allow backups from this repository to be created.
|
||||
|
||||
The -no-restore option will not allow restoring this repository to a different revision.
|
||||
|
||||
The -no-save-password option will require every password or token to be entered every time and not saved anywhere.
|
||||
|
||||
The -key and -value options are used to store (in plain text) access keys or tokens need by various storages. Please
|
||||
refer to the [Managing Passwords](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#managing-passwords) section for more details.
|
||||
|
||||
You can select a storage to change options for by specifying a storage name.
|
||||
|
||||
|
||||
#### Copy
|
||||
```
|
||||
SYNOPSIS:
|
||||
duplicacy copy - Copy snapshots between compatible storages
|
||||
|
||||
USAGE:
|
||||
duplicacy copy [command options]
|
||||
|
||||
OPTIONS:
|
||||
-id <snapshot id> copy snapshots with the specified id instead of all snapshot ids
|
||||
-r <revision> [+] copy snapshots with the specified revisions
|
||||
-from <storage name> copy snapshots from the specified storage
|
||||
-to <storage name> copy snapshots to the specified storage
|
||||
```
|
||||
|
||||
The *copy* command copies snapshots from one storage to another storage. They must be copy-compatible, i.e., some
|
||||
configuration parameters must be the same. One storage must be initialized with the -copy option provided by the *add* command.
|
||||
|
||||
Instead of copying all snapshots, you can specify a set of snapshots to copy by giving the -r options. The *copy* command
|
||||
preserves the revision numbers, so if a revision number already exists on the destination storage the command will fail.
|
||||
|
||||
If no -from option is given, the snapshots from the default storage will be copied. The -to option specified the
|
||||
destination storage and is required.
|
||||
|
||||
## Include/Exclude Patterns
|
||||
|
||||
An include pattern starts with +, and an exclude pattern starts with -. Patterns may contain wildcard characters such as * and ? with their normal meaning.
|
||||
|
||||
When matching a path against a list of patterns, the path is compared with the part after + or -, one pattern at a time. Therefore, the order of the patterns is significant. If a match with an include pattern is found, the path is said to be included without further comparisons. If a match with an exclude pattern is found, the path is said to be excluded without further comparison. If a match is not found, the path will be excluded if all patterns are include patterns, but included otherwise.
|
||||
|
||||
Patterns ending with a / apply to directories only, and patterns not ending with a / apply to files only. When a directory is excluded, all files and subdirectires under it will also be excluded. Note that the path separator is always /, even on Windows.
|
||||
|
||||
The following pattern list includes only files under the directory foo/ but not files under the subdirectory foo/bar:
|
||||
|
||||
```
|
||||
-foo/bar/
|
||||
+foo/*
|
||||
-*
|
||||
```
|
||||
|
||||
For the *backup* command, the include/exclude patterns are read from a file named *filters* under the *.duplicacy* directory.
|
||||
|
||||
For the *restore* command, the include/exclude patterns are specified as the command line arguments.
|
||||
|
||||
|
||||
## Managing Passwords
|
||||
|
||||
Duplicacy will attempt to retrieve in three ways the storage password and the storage-specific access tokens/keys.
|
||||
|
||||
* If a secret vault service is available, Duplicacy will store passwords/keys entered by the user in such a secret vault and later retrieve them when needed. On Mac OS X it is Keychain, and on Linux it is gnome-keyring. On Windows the passwords/keys are encrypted and decrypted by the Data Protection API, and encrypted passwords/keys are stored in the file *.duplicacy/keyring*. However, if the -no-save-password option is specified for the storage, then Duplicacy will not save passwords this way.
|
||||
* If an environment variable for a password is provided, Duplicacy will always take it. The table below shows the name of the environment variable for each kind of password. Note that if the storage is not the default one, the storage name will be included in the name of the environment variable.
|
||||
* If a matching key and its value are saved to the preference file (.duplicacy/preferences) by the *set* command, the value will be used as the password. The last column in the table below lists the name of the preference key for each type of password.
|
||||
|
||||
| password type | environment variable (default storage) | environment variable (non-default storage) | key in preferences |
|
||||
|:----------------:|:----------------:|:----------------:|:----------------:|
|
||||
| storage password | DUPLICACY_PASSWORD | DUPLICACY_<STORAGENAME>_PASSWORD | password |
|
||||
| sftp password | DUPLICACY_SSH_PASSWORD | DUPLICACY_<STORAGENAME>_SSH_PASSWORD | ssh_password |
|
||||
| sftp key file | DUPLICACY_SSH_KEY_FILE | DUPLICACY_<STORAGENAME>_SSH_KEY_FILE | ssh_key_file |
|
||||
| Dropbox Token | DUPLICACY_DROPBOX_TOKEN | DUPLICACY_<STORAGENAME>>_DROPBOX_TOKEN | dropbox_token |
|
||||
| S3 Access ID | DUPLICACY_S3_ID | DUPLICACY_<STORAGENAME>_S3_ID | s3_id |
|
||||
| S3 Secret Key | DUPLICACY_S3_SECRET | DUPLICACY_<STORAGENAME>_S3_SECRET | s3_secret |
|
||||
| BackBlaze Account ID | DUPLICACY_B2_ID | DUPLICACY_<STORAGENAME>_B2_ID | b2_id |
|
||||
| Backblaze Application Key | DUPLICACY_B2_KEY | DUPLICACY_<STORAGENAME>_B2_KEY | b2_key |
|
||||
| Azure Access Key | DUPLICACY_AZURE_KEY | DUPLICACY_<STORAGENAME>_AZURE_KEY | azure_key |
|
||||
| Google Drive Token File | DUPLICACY_GCD_TOKEN | DUPLICACY_<STORAGENAME>_GCD_TOKEN | gcd_token |
|
||||
| Microsoft OneDrive Token File | DUPLICACY_ONE_TOKEN | DUPLICACY_<STORAGENAME>_ONE_TOKEN | one_token |
|
||||
| Hubic Token File | DUPLICACY_HUBIC_TOKEN | DUPLICACY_<STORAGENAME>_HUBIC_TOKEN | hubic_token |
|
||||
|
||||
Note that the passwords stored in the environment variable and the preference need to be in plaintext and thus are insecure and should be avoided whenever possible.
|
||||
|
||||
## Scripts
|
||||
|
||||
You can instruct Duplicacy to run a script before or after executing a command. For example, if you create a bash script with the name *pre-prune* under the *.duplicacy/scripts* directory, this bash script will be run before the *prune* command starts. A script named *post-prune* will be run after the *prune* command finishes. This rule applies to all commands except *init*.
|
||||
All documentation has been moved to our wiki page:
|
||||
|
||||
* Commands
|
||||
* [init](https://github.com/gilbertchen/duplicacy/wiki/init)
|
||||
* [backup](https://github.com/gilbertchen/duplicacy/wiki/backup)
|
||||
* [restore](https://github.com/gilbertchen/duplicacy/wiki/restore)
|
||||
* [list](https://github.com/gilbertchen/duplicacy/wiki/list)
|
||||
* [check](https://github.com/gilbertchen/duplicacy/wiki/check)
|
||||
* [prune](https://github.com/gilbertchen/duplicacy/wiki/prune)
|
||||
* [cat](https://github.com/gilbertchen/duplicacy/wiki/cat)
|
||||
* [history](https://github.com/gilbertchen/duplicacy/wiki/history)
|
||||
* [diff](https://github.com/gilbertchen/duplicacy/wiki/diff)
|
||||
* [password](https://github.com/gilbertchen/duplicacy/wiki/password)
|
||||
* [add](https://github.com/gilbertchen/duplicacy/wiki/add)
|
||||
* [set](https://github.com/gilbertchen/duplicacy/wiki/set)
|
||||
* [copy](https://github.com/gilbertchen/duplicacy/wiki/copy)
|
||||
* [Include/Exclude Patterns](https://github.com/gilbertchen/duplicacy/wiki/Include-Exclude-Patterns)
|
||||
* [Managing Passwords](https://github.com/gilbertchen/duplicacy/wiki/Managing-Passwords)
|
||||
* [Cache](https://github.com/gilbertchen/duplicacy/wiki/Cache)
|
||||
* [Pre-Command and Post-Command Scripts](https://github.com/gilbertchen/duplicacy/wiki/Pre-Command-and-Post-Command-Scripts)
|
||||
|
||||
212
Gopkg.lock
generated
Normal file
212
Gopkg.lock
generated
Normal file
@@ -0,0 +1,212 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata","iam","internal","internal/optional","internal/version","storage"]
|
||||
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||
version = "v0.16.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "c67b24a8e30d876542a85022ebbdecf0e5a935e8"
|
||||
version = "v9.4.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/aryann/difflib"
|
||||
packages = ["."]
|
||||
revision = "e206f873d14a916d3d26c40ab667bca123f365a3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"]
|
||||
revision = "a32b1dcd091264b5dee7b386149b6cc3823395c9"
|
||||
version = "v1.12.31"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/bkaradzic/go-lz4"
|
||||
packages = ["."]
|
||||
revision = "74ddf82598bc4745b965729e9c6a463bedd33049"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
||||
version = "v3.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "2d49bb8f2cee530cc16f1f1a9f0aae763dee257d"
|
||||
version = "v10.2.1-beta"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gilbertchen/cli"
|
||||
packages = ["."]
|
||||
revision = "565493f259bf868adb54d45d5f4c68d405117adf"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go-dropbox"
|
||||
packages = ["."]
|
||||
revision = "90711b603312b1f973f3a5da3793ac4f1e5c2f2a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gilbertchen/go-ole"
|
||||
packages = ["."]
|
||||
revision = "0e87ea779d9deb219633b828a023b32e1244dd57"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go.dbus"
|
||||
packages = ["."]
|
||||
revision = "9e442e6378618c083fd3b85b703ffd202721fb17"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/goamz"
|
||||
packages = ["aws","s3"]
|
||||
revision = "eada9f4e8cc2a45db775dee08a2c37597ce4760a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/gopass"
|
||||
packages = ["."]
|
||||
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/keyring"
|
||||
packages = ["."]
|
||||
revision = "8855f5632086e51468cd7ce91056f8da69687ef6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/xattr"
|
||||
packages = ["."]
|
||||
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
|
||||
version = "v2.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/minio/blake2b-simd"
|
||||
packages = ["."]
|
||||
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/uuid"
|
||||
packages = ["."]
|
||||
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/vaughan0/go-ini"
|
||||
packages = ["."]
|
||||
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","ssh","ssh/agent","ssh/terminal"]
|
||||
revision = "9f005a07e0d31d45e6656d241bb5c0f2efd4bc94"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "9dfe39835686865bff950a07b394c12a98ddc811"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "82aafbf43bf885069dc71b7e7c2f9d7a614d47da"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["drive/v3","gensupport","googleapi","googleapi/internal/uritemplates","googleapi/transport","internal","iterator","option","storage/v1","transport/http"]
|
||||
revision = "17b5f22a248d6d3913171c1a557552ace0d9c806"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status"]
|
||||
revision = "891aceb7c239e72692819142dfca057bdcbfcb96"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
||||
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
|
||||
version = "v1.8.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "95a162eedee5e915fbd1917c3ba5021e646aa2f13a542c7cbeb02bcf30a3acb9"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
94
Gopkg.toml
Normal file
94
Gopkg.toml
Normal file
@@ -0,0 +1,94 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
version = "0.16.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/aryann/difflib"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.12.31"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/bkaradzic/go-lz4"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||
version = "10.2.1-beta"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gilbertchen/cli"
|
||||
version = "1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go-dropbox"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gilbertchen/go-ole"
|
||||
version = "1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/goamz"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/gopass"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/keyring"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/xattr"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/minio/blake2b-simd"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/sftp"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
24
LICENSE.md
24
LICENSE.md
@@ -1,20 +1,8 @@
|
||||
Copyright © 2017 Acrosync LLC
|
||||
|
||||
Licensor: Acrosync LLC
|
||||
|
||||
Software: Dulicacy
|
||||
|
||||
Use Limitation: 5 users
|
||||
|
||||
License Grant. Licensor hereby grants to each recipient of the Software (“you”) a non-exclusive, non-transferable, royalty-free and fully-paid-up license, under all of the Licensor’s copyright and patent rights, to use, copy, distribute, prepare derivative works of, publicly perform and display the Software, subject to the Use Limitation and the conditions set forth below.
|
||||
|
||||
Use Limitation. The license granted above allows use by up to the number of users per entity set forth above (the “Use Limitation”). For determining the number of users, “you” includes all affiliates, meaning legal entities controlling, controlled by, or under common control with you. If you exceed the Use Limitation, your use is subject to payment of Licensor’s then-current list price for licenses.
|
||||
|
||||
Conditions. Redistribution in source code or other forms must include a copy of this license document to be provided in a reasonable manner. Any redistribution of the Software is only allowed subject to this license.
|
||||
|
||||
Trademarks. This license does not grant you any right in the trademarks, service marks, brand names or logos of Licensor.
|
||||
|
||||
DISCLAIMER. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OR CONDITION, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. LICENSORS HEREBY DISCLAIM ALL LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE.
|
||||
|
||||
Termination. If you violate the terms of this license, your rights will terminate automatically and will not be reinstated without the prior written consent of Licensor. Any such termination will not affect the right of others who may have received copies of the Software from you.
|
||||
|
||||
* Free for personal use or commercial trial
|
||||
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
|
||||
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
|
||||
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||
|
||||
240
README.md
240
README.md
@@ -1,6 +1,6 @@
|
||||
# Duplicacy: A lock-free deduplication cloud backup tool
|
||||
|
||||
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy-cli/blob/master/DESIGN.md). It is the only cloud backup tool that allows multiple computers to back up to the same storage simultaneously without using any locks (thus readily amenable to various cloud storage services).
|
||||
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication). It is the only cloud backup tool that allows multiple computers to back up to the same storage simultaneously without using any locks (thus readily amenable to various cloud storage services).
|
||||
|
||||
The repository hosts source code, design documents, and binary releases of the command line version. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
|
||||
|
||||
@@ -8,7 +8,7 @@ There is a special edition of Duplicacy developed for VMware vSphere (ESXi) name
|
||||
|
||||
## Features
|
||||
|
||||
Duplicacy currently supports major cloud storage providers (Amazon S3, Google Cloud Storage, Microsoft Azure, Dropbox, Backblaze, Google Drive, Microsoft OneDrive, and Hubic) and offers all essential features of a modern backup tool:
|
||||
Duplicacy currently supports major cloud storage providers (Amazon S3, Google Cloud Storage, Microsoft Azure, Dropbox, Backblaze B2, Google Drive, Microsoft OneDrive, and Hubic) and offers all essential features of a modern backup tool:
|
||||
|
||||
* Incremental backup: only back up what has been changed
|
||||
* Full snapshot: although each backup is incremental, it must behave like a full snapshot for easy restore and deletion
|
||||
@@ -18,195 +18,54 @@ Duplicacy currently supports major cloud storage providers (Amazon S3, Google Cl
|
||||
* Concurrent access: multiple clients can back up to the same storage at the same time
|
||||
* Snapshot migration: all or selected snapshots can be migrated from one storage to another
|
||||
|
||||
The key idea of **Lock-Free Deduplication** can be summarized as follows:
|
||||
The key idea of **[Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication)** can be summarized as follows:
|
||||
|
||||
* Use variable-size chunking algorithm to split files into chunks
|
||||
* Store each chunk in the storage using a file name derived from its hash, and rely on the file system API to manage chunks without using a centralized indexing database
|
||||
* Apply a *two-step fossil collection* algorithm to remove chunks that become unreferenced after a backup is deleted
|
||||
|
||||
The [design document](https://github.com/gilbertchen/duplicacy-cli/blob/master/DESIGN.md) explains lock-free deduplication in detail.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Duplicacy is written in Go. You can run the following command to build the executable (which will be created under `$GOPATH/bin`):
|
||||
|
||||
```
|
||||
go get -u github.com/gilbertchen/duplicacy/...
|
||||
```
|
||||
|
||||
You can also visit the [releases page](https://github.com/gilbertchen/duplicacy-cli/releases/latest) to download the pre-built binary suitable for your platform..
|
||||
|
||||
Once you have the Duplicacy executable on your path, you can change to the directory that you want to back up (called *repository*) and run the *init* command:
|
||||
|
||||
```
|
||||
$ cd path/to/your/repository
|
||||
$ duplicacy init mywork sftp://user@192.168.1.100/path/to/storage
|
||||
```
|
||||
|
||||
This *init* command connects the repository with the remote storage at 192.168.1.00 via SFTP. It will initialize the remote storage if this has not been done before. It also assigns the snapshot id *mywork* to the repository. This snapshot id is used to uniquely identify this repository if there are other repositories that also back up to the same storage.
|
||||
|
||||
You can now create snapshots of the repository by invoking the *backup* command. The first snapshot may take a while depending on the size of the repository and the upload bandwidth. Subsequent snapshots will be much faster, as only new or modified files will be uploaded. Each snapshot is identified by the snapshot id and an increasing revision number starting from 1.
|
||||
|
||||
```sh
|
||||
$ duplicacy backup -stats
|
||||
```
|
||||
|
||||
Duplicacy provides a set of commands, such as list, check, diff, cat history, to manage snapshots:
|
||||
|
||||
```makefile
|
||||
$ duplicacy list # List all snapshots
|
||||
$ duplicacy check # Check integrity of snapshots
|
||||
$ duplicacy diff # Compare two snapshots, or the same file in two snapshots
|
||||
$ duplicacy cat # Print a file in a snapshot
|
||||
$ duplicacy history # Show how a file changes over time
|
||||
```
|
||||
|
||||
The *restore* command rolls back the repository to a previous revision:
|
||||
```sh
|
||||
$ duplicacy restore -r 1
|
||||
```
|
||||
|
||||
The *prune* command removes snapshots by revisions, or tags, or retention policies:
|
||||
|
||||
```sh
|
||||
$ duplicacy prune -r 1 # Remove the snapshot with revision number 1
|
||||
$ duplicacy prune -t quick # Remove all snapshots with the tag 'quick'
|
||||
$ duplicacy prune -keep 1:7 # Keep 1 snapshot per day for snapshots older than 7 days
|
||||
$ duplicacy prune -keep 7:30 # Keep 1 snapshot every 7 days for snapshots older than 30 days
|
||||
$ duplicacy prune -keep 0:180 # Remove all snapshots older than 180 days
|
||||
```
|
||||
|
||||
The first time the *prune* command is called, it removes the specified snapshots but keeps all unreferenced chunks as fossils.
|
||||
Since it uses the two-step fossil collection algorithm to clean chunks, you will need to run it again to remove those fossils from the storage:
|
||||
|
||||
```sh
|
||||
$ duplicacy prune # Chunks from deleted snapshots will be removed if deletion criteria are met
|
||||
```
|
||||
|
||||
To back up to multiple storages, use the *add* command to add a new storage. The *add* command is similar to the *init* command, except that the first argument is a storage name used to distinguish different storages:
|
||||
|
||||
```sh
|
||||
$ duplicacy add s3 mywork s3://amazon.com/mybucket/path/to/storage
|
||||
```
|
||||
|
||||
You can back up to any storage by specifying the storage name:
|
||||
|
||||
```sh
|
||||
$ duplicacy backup -storage s3
|
||||
```
|
||||
|
||||
However, snapshots created this way will be different on different storages, if the repository has been changed during two backup operations. A better approach, is to use the *copy* command to copy specified snapshots from one storage to another:
|
||||
|
||||
```sh
|
||||
$ duplicacy copy -r 1 -to s3 # Copy snapshot at revision 1 to the s3 storage
|
||||
$ duplicacy copy -to s3 # Copy every snapshot to the s3 storage
|
||||
```
|
||||
|
||||
The [User Guide](https://github.com/gilbertchen/duplicacy-cli/blob/master/GUIDE.md) contains a complete reference to
|
||||
all commands and other features of Duplicacy.
|
||||
* [A brief introduction](https://github.com/gilbertchen/duplicacy/wiki/Quick-Start)
|
||||
* [Command references](https://github.com/gilbertchen/duplicacy/wiki)
|
||||
|
||||
## Storages
|
||||
|
||||
Duplicacy currently supports local file storage, SFTP, and 5 cloud storage providers.
|
||||
With Duplicacy, you can back up files to local or networked drives, SFTP servers, or many cloud storage providers. The following table compares the costs of all cloud storages supported by Duplicacy.
|
||||
|
||||
#### Local disk
|
||||
| Type | Storage (monthly) | Upload | Download | API Charge |
|
||||
|:------------:|:-------------:|:------------------:|:--------------:|:-----------:|
|
||||
| Amazon S3 | $0.023/GB | free | $0.09/GB | [yes](https://aws.amazon.com/s3/pricing/) |
|
||||
| Wasabi | $3.99 first 1TB <br> $0.0039/GB additional | free | $.04/GB | no |
|
||||
| DigitalOcean Spaces| $5 first 250GB <br> $0.02/GB additional | free | first 1TB free <br> $0.01/GB additional| no |
|
||||
| Backblaze B2 | $0.005/GB | free | $0.02/GB | [yes](https://www.backblaze.com/b2/b2-transactions-price.html) |
|
||||
| Google Cloud Storage| $0.026/GB | free |$ 0.12/GB | [yes](https://cloud.google.com/storage/pricing) |
|
||||
| Google Drive | 15GB free <br> $1.99/100GB <br> $9.99/TB | free | free | no |
|
||||
| Microsoft Azure | $0.0184/GB | free | free | [yes](https://azure.microsoft.com/en-us/pricing/details/storage/blobs/) |
|
||||
| Microsoft OneDrive | 5GB free <br> $1.99/50GB <br> $5.83/TB | free | free | no |
|
||||
| Dropbox | 2GB free <br> $8.25/TB | free | free | no |
|
||||
| Hubic | 25GB free <br> €1/100GB <br> €5/10TB | free | free | no |
|
||||
|
||||
```
|
||||
Storage URL: /path/to/storage (on Linux or Mac OS X)
|
||||
C:\path\to\storage (on Windows)
|
||||
```
|
||||
Please consult the [wiki page](https://github.com/gilbertchen/duplicacy/wiki/Storage-Backends) on how to set up Duplicacy to work with each cloud storage.
|
||||
|
||||
#### SFTP
|
||||
It should be noted that their performances vary a lot. A [performance comparison](https://github.com/gilbertchen/cloud-storage-comparison) of these storages measured the running times (in seconds) of backing up and restoring the [Linux code base](https://github.com/torvalds/linux) as follows:
|
||||
|
||||
```
|
||||
Storage URL: sftp://username@server/path/to/storage
|
||||
```
|
||||
| Storage | initial backup | 2nd | 3rd | initial restore | 2nd | 3rd |
|
||||
|:--------------------:|:------:|:----:|:-----:|:----:|:-----:|:----:|
|
||||
| SFTP | 31.5 | 6.6 | 20.6 | 22.5 | 7.8 | 18.4 |
|
||||
| Amazon S3 | 41.1 | 5.9 | 21.9 | 27.7 | 7.6 | 23.5 |
|
||||
| Wasabi | 38.7 | 5.7 | 31.7 | 25.7 | 6.5 | 23.2 |
|
||||
| DigitalOcean Spaces | 51.6 | 7.1 | 31.7 | 29.3 | 6.4 | 27.6 |
|
||||
| Backblaze B2 | 106.7 | 24.0 | 88.2 | 67.9 | 14.4 | 39.1 |
|
||||
| Google Cloud Storage | 76.9 | 11.9 | 33.1 | 39.5 | 9.9 | 26.2 |
|
||||
| Google Drive | 139.3 | 14.7 | 45.2 | 129.4 | 17.8 | 54.4 |
|
||||
| Microsoft Azure | 35.0 | 5.4 | 20.4 | 30.7 | 7.1 | 21.5 |
|
||||
| Microsoft OneDrive | 250.0 | 31.6 | 80.2 | 333.4 | 26.2 | 82.0 |
|
||||
| Dropbox | 267.2 | 35.8 | 113.7 | 164.0 | 31.6 | 80.3 |
|
||||
|
||||
Login methods include password authentication and public key authentication. Due to a limitation of the underlying Go SSH library, the key pair for public key authentication must be generated without a passphrase. To work with a key that has a passphrase, you can set up SSH agent forwarding which is also supported by Duplicacy.
|
||||
For more details please visit https://github.com/gilbertchen/cloud-storage-comparison.
|
||||
|
||||
#### Dropbox
|
||||
|
||||
```
|
||||
Storage URL: dropbox://path/to/storage
|
||||
```
|
||||
|
||||
For Duplicacy to access your Dropbox storage, you must provide an access token that can be obtained in one of two ways:
|
||||
|
||||
* Create your own app on the [Dropbox Developer](https://www.dropbox.com/developers) page, and then generate the [access token](https://blogs.dropbox.com/developers/2014/05/generate-an-access-token-for-your-own-account/)
|
||||
|
||||
* Or authorize Duplicacy to access its app folder inside your Dropbox (following [this link](https://dl.dropboxusercontent.com/u/95866350/start_dropbox_token.html)), and Dropbox will generate the access token (which is not visible to us, as the redirect page showing the token is merely a static html hosted by Dropbox)
|
||||
|
||||
Dropbox has two advantages over other cloud providers. First, if you are already a paid user then to use the unused space as the backup storage is basically free. Second, unlike other providers Dropbox does not charge bandwidth or API usage fees.
|
||||
|
||||
#### Amazon S3
|
||||
|
||||
```
|
||||
Storage URL: s3://amazon.com/bucket/path/to/storage (default region is us-east-1)
|
||||
s3://region@amazon.com/bucket/path/to/storage (other regions must be specified)
|
||||
```
|
||||
|
||||
You'll need to input an access key and a secret key to access your Amazon S3 storage.
|
||||
|
||||
|
||||
#### Google Cloud Storage
|
||||
|
||||
```
|
||||
Storage URL: gcs://bucket/path/to/storage
|
||||
```
|
||||
|
||||
Starting from version 2.0.0, a new Google Cloud Storage backend is added which is implemented using the [official Google client library](https://godoc.org/cloud.google.com/go/storage). You must first obtain a credential file by [authorizing](https://duplicacy.com/gcp_start) Duplicacy to access your Google Cloud Storage account or by [downloading](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) a service account credential file.
|
||||
|
||||
You can also use the s3 protocol to access Google Cloud Storage. To do this, you must enable the [s3 interoperability](https://cloud.google.com/storage/docs/migrating#migration-simple) in your Google Cloud Storage settings and set the storage url as `s3://storage.googleapis.com/bucket/path/to/storage`.
|
||||
|
||||
#### Microsoft Azure
|
||||
|
||||
```
|
||||
Storage URL: azure://account/container
|
||||
```
|
||||
|
||||
You'll need to input the access key once prompted.
|
||||
|
||||
#### Backblaze
|
||||
|
||||
```
|
||||
Storage URL: b2://bucket
|
||||
```
|
||||
|
||||
You'll need to input the account id and application key.
|
||||
|
||||
Backblaze's B2 storage is not only the least expensive (at 0.5 cent per GB per month), but also the fastest. We have been working closely with their developers to leverage the full potentials provided by the B2 API in order to maximize the transfer speed.
|
||||
|
||||
#### Google Drive
|
||||
|
||||
```
|
||||
Storage URL: gcd://path/to/storage
|
||||
```
|
||||
|
||||
To use Google Drive as the storage, you first need to download a token file from https://duplicacy.com/gcd_start by
|
||||
authorizing Duplicacy to access your Google Drive, and then enter the path to this token file to Duplicacy when prompted.
|
||||
|
||||
#### Microsoft OneDrive
|
||||
|
||||
```
|
||||
Storage URL: one://path/to/storage
|
||||
```
|
||||
|
||||
To use Microsoft OneDrive as the storage, you first need to download a token file from https://duplicacy.com/one_start by
|
||||
authorizing Duplicacy to access your OneDrive, and then enter the path to this token file to Duplicacy when prompted.
|
||||
|
||||
#### Hubic
|
||||
|
||||
```
|
||||
Storage URL: hubic://path/to/storage
|
||||
```
|
||||
|
||||
To use Hubic as the storage, you first need to download a token file from https://duplicacy.com/hubic_start by
|
||||
authorizing Duplicacy to access your Hubic drive, and then enter the path to this token file to Duplicacy when prompted.
|
||||
|
||||
Hubic offers the most free space (25GB) of all major cloud providers and there is no bandwidth charge (same as Google Drive and OneDrive), so it may be worth a try.
|
||||
|
||||
|
||||
## Comparison with Other Backup Tools
|
||||
## Feature Comparison with Other Backup Tools
|
||||
|
||||
[duplicity](http://duplicity.nongnu.org) works by applying the rsync algorithm (or more specific, the [librsync](https://github.com/librsync/librsync) library)
|
||||
to find the differences from previous backups and only then uploading the differences. It is the only existing backup tool with extensive cloud support -- the [long list](http://duplicity.nongnu.org/duplicity.1.html#sect7) of storage backends covers almost every cloud provider one can think of. However, duplicity's biggest flaw lies in its incremental model -- a chain of dependent backups starts with a full backup followed by a number of incremental ones, and ends when another full backup is uploaded. Deleting one backup will render useless all the subsequent backups on the same chain. Periodic full backups are required, in order to make previous backups disposable.
|
||||
@@ -242,9 +101,34 @@ The following table compares the feature lists of all these backup tools:
|
||||
| Snapshot Migration | No | No | No | No | No | **Yes** |
|
||||
|
||||
|
||||
## Performance Comparison with Other Backup Tools
|
||||
|
||||
Duplicacy is not only more feature-rich but also faster than other backup tools. The following table lists the running times in seconds of backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other tools. Clearly Duplicacy is the fastest by a significant margin.
|
||||
|
||||
|
||||
| | Duplicacy | restic | Attic | duplicity |
|
||||
|:------------------:|:----------------:|:----------:|:----------:|:-----------:|
|
||||
| Initial backup | 13.7 | 20.7 | 26.9 | 44.2 |
|
||||
| 2nd backup | 4.8 | 8.0 | 15.4 | 19.5 |
|
||||
| 3rd backup | 6.9 | 11.9 | 19.6 | 29.8 |
|
||||
| 4th backup | 3.3 | 7.0 | 13.7 | 18.6 |
|
||||
| 5th backup | 9.9 | 11.4 | 19.9 | 28.0 |
|
||||
| 6th backup | 3.8 | 8.0 | 16.8 | 22.0 |
|
||||
| 7th backup | 5.1 | 7.8 | 14.3 | 21.6 |
|
||||
| 8th backup | 9.5 | 13.5 | 18.3 | 35.0 |
|
||||
| 9th backup | 4.3 | 9.0 | 15.7 | 24.9 |
|
||||
| 10th backup | 7.9 | 20.2 | 32.2 | 35.0 |
|
||||
| 11th backup | 4.6 | 9.1 | 16.8 | 28.1 |
|
||||
| 12th backup | 7.4 | 12.0 | 21.7 | 37.4 |
|
||||
|
||||
|
||||
For more details and other speed comparison results, please visit https://github.com/gilbertchen/benchmarking. There you can also find test scripts that you can use to run your own experiments.
|
||||
|
||||
## License
|
||||
|
||||
Duplicacy CLI is released under the [Fair Source 5 License](https://fair.io), which means it is free for individual users or any company or organization with less than 5 users. If your company or organization has 5 or more users, then a license for the actual number of users must be purchased from [duplicacy.com](https://duplicacy.com/customer).
|
||||
|
||||
A user is defined as the owner of any files to be backed up by Duplicacy. If you are an IT administrator who uses Duplicacy to back up files for your colleagues, then each colleague will be counted in the user limit permitted by the license.
|
||||
* Free for personal use or commercial trial
|
||||
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
|
||||
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
|
||||
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||
|
||||
@@ -1,25 +1,27 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/gilbertchen/cli"
|
||||
|
||||
"github.com/gilbertchen/duplicacy/src"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/gilbertchen/duplicacy/src"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -147,18 +149,27 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
|
||||
|
||||
preferencePath := duplicacy.GetDuplicacyPreferencePath()
|
||||
scriptDir, _ := filepath.Abs(path.Join(preferencePath, "scripts"))
|
||||
scriptName := phase + "-" + context.Command.Name
|
||||
scriptNames := []string{phase + "-" + context.Command.Name,
|
||||
storageName + "-" + phase + "-" + context.Command.Name}
|
||||
|
||||
script := path.Join(scriptDir, scriptName)
|
||||
if _, err := os.Stat(script); err != nil {
|
||||
scriptName = storageName + "-" + scriptName
|
||||
script := ""
|
||||
for _, scriptName := range scriptNames {
|
||||
script = path.Join(scriptDir, scriptName)
|
||||
if _, err = os.Stat(script); err != nil {
|
||||
if runtime.GOOS == "windows" {
|
||||
script += ".bat"
|
||||
}
|
||||
if _, err := os.Stat(script); err == nil {
|
||||
break
|
||||
} else {
|
||||
script = ""
|
||||
}
|
||||
}
|
||||
|
||||
if script == "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
duplicacy.LOG_INFO("SCRIPT_RUN", "Running %s script", scriptName)
|
||||
duplicacy.LOG_INFO("SCRIPT_RUN", "Running script %s", script)
|
||||
|
||||
output, err := exec.Command(script, os.Args...).CombinedOutput()
|
||||
for _, line := range strings.Split(string(output), "\n") {
|
||||
@@ -212,6 +223,11 @@ func configRepository(context *cli.Context, init bool) {
|
||||
storageName = context.Args()[0]
|
||||
snapshotID = context.Args()[1]
|
||||
storageURL = context.Args()[2]
|
||||
|
||||
if strings.ToLower(storageName) == "ssh" {
|
||||
duplicacy.LOG_ERROR("PREFERENCE_INVALID", "'%s' is an invalid storage name", storageName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var repository string
|
||||
@@ -229,7 +245,6 @@ func configRepository(context *cli.Context, init bool) {
|
||||
preferencePath = path.Join(repository, duplicacy.DUPLICACY_DIRECTORY) // TOKEEP
|
||||
}
|
||||
|
||||
|
||||
if stat, _ := os.Stat(path.Join(preferencePath, "preferences")); stat != nil {
|
||||
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
|
||||
return
|
||||
@@ -352,12 +367,14 @@ func configRepository(context *cli.Context, init bool) {
|
||||
}
|
||||
|
||||
var otherConfig *duplicacy.Config
|
||||
var bitCopy bool
|
||||
if context.String("copy") != "" {
|
||||
|
||||
otherPreference := duplicacy.FindPreference(context.String("copy"))
|
||||
|
||||
if otherPreference == nil {
|
||||
|
||||
duplicacy.LOG_ERROR("STORAGE_NOTFOUND", "Storage '%s' can't be found", context.String("copy"))
|
||||
return
|
||||
}
|
||||
|
||||
otherStorage := duplicacy.CreateStorage(*otherPreference, false, 1)
|
||||
@@ -379,10 +396,16 @@ func configRepository(context *cli.Context, init bool) {
|
||||
duplicacy.LOG_ERROR("STORAGE_NOT_CONFIGURED",
|
||||
"The storage to copy the configuration from has not been initialized")
|
||||
}
|
||||
|
||||
bitCopy = context.Bool("bit-identical")
|
||||
}
|
||||
|
||||
duplicacy.ConfigStorage(storage, compressionLevel, averageChunkSize, maximumChunkSize,
|
||||
minimumChunkSize, storagePassword, otherConfig)
|
||||
iterations := context.Int("iterations")
|
||||
if iterations == 0 {
|
||||
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
||||
}
|
||||
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
||||
minimumChunkSize, storagePassword, otherConfig, bitCopy)
|
||||
}
|
||||
|
||||
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
||||
@@ -534,7 +557,9 @@ func changePassword(context *cli.Context) {
|
||||
|
||||
password := ""
|
||||
if preference.Encrypted {
|
||||
password = duplicacy.GetPassword(*preference, "password", "Enter old password for storage %s:", false, true)
|
||||
password = duplicacy.GetPassword(*preference, "password",
|
||||
fmt.Sprintf("Enter old password for storage %s:", preference.StorageURL),
|
||||
false, true)
|
||||
}
|
||||
|
||||
config, _, err := duplicacy.DownloadConfig(storage, password)
|
||||
@@ -559,7 +584,12 @@ func changePassword(context *cli.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
duplicacy.UploadConfig(storage, config, newPassword)
|
||||
iterations := context.Int("iterations")
|
||||
if iterations == 0 {
|
||||
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
||||
}
|
||||
|
||||
duplicacy.UploadConfig(storage, config, newPassword, iterations)
|
||||
|
||||
duplicacy.SavePassword(*preference, "password", newPassword)
|
||||
|
||||
@@ -611,12 +641,14 @@ func backupRepository(context *cli.Context) {
|
||||
|
||||
enableVSS := context.Bool("vss")
|
||||
|
||||
dryRun := context.Bool("dry-run")
|
||||
uploadRateLimit := context.Int("limit-rate")
|
||||
storage.SetRateLimits(0, uploadRateLimit)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SetDryRun(dryRun)
|
||||
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS)
|
||||
|
||||
runScript(context, preference.Name, "post")
|
||||
@@ -662,11 +694,15 @@ func restoreRepository(context *cli.Context) {
|
||||
quickMode := !context.Bool("hash")
|
||||
overwrite := context.Bool("overwrite")
|
||||
deleteMode := context.Bool("delete")
|
||||
setOwner := !context.Bool("ignore-owner")
|
||||
|
||||
showStatistics := context.Bool("stats")
|
||||
|
||||
var patterns []string
|
||||
for _, pattern := range context.Args() {
|
||||
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
|
||||
for strings.HasPrefix(pattern, "--") {
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
@@ -675,24 +711,33 @@ func restoreRepository(context *cli.Context) {
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
|
||||
if pattern[0] != '+' && pattern[0] != '-' {
|
||||
if duplicacy.IsUnspecifiedFilter(pattern) {
|
||||
pattern = "+" + pattern
|
||||
}
|
||||
|
||||
if pattern == "+" || pattern == "-" {
|
||||
if duplicacy.IsEmptyFilter(pattern) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
valid, err := duplicacy.IsValidRegex(pattern[2:])
|
||||
if !valid || err != nil {
|
||||
duplicacy.LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||
}
|
||||
}
|
||||
|
||||
patterns = append(patterns, pattern)
|
||||
|
||||
}
|
||||
|
||||
duplicacy.LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(duplicacy.RegexMap))
|
||||
|
||||
storage.SetRateLimits(context.Int("limit-rate"), 0)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, showStatistics, patterns)
|
||||
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns)
|
||||
|
||||
runScript(context, preference.Name, "post")
|
||||
}
|
||||
@@ -787,12 +832,13 @@ func checkSnapshots(context *cli.Context) {
|
||||
}
|
||||
|
||||
showStatistics := context.Bool("stats")
|
||||
showTabular := context.Bool("tabular")
|
||||
checkFiles := context.Bool("files")
|
||||
searchFossils := context.Bool("fossils")
|
||||
resurrect := context.Bool("resurrect")
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, checkFiles, searchFossils, resurrect)
|
||||
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
|
||||
|
||||
runScript(context, preference.Name, "post")
|
||||
}
|
||||
@@ -864,7 +910,6 @@ func diff(context *cli.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
password := ""
|
||||
if preference.Encrypted {
|
||||
password = duplicacy.GetPassword(*preference, "password", "Enter storage password:", false, false)
|
||||
@@ -1008,12 +1053,17 @@ func copySnapshots(context *cli.Context) {
|
||||
os.Exit(ArgumentExitCode)
|
||||
}
|
||||
|
||||
threads := context.Int("threads")
|
||||
if threads < 1 {
|
||||
threads = 1
|
||||
}
|
||||
|
||||
repository, source := getRepositoryPreference(context, context.String("from"))
|
||||
|
||||
runScript(context, source.Name, "pre")
|
||||
|
||||
duplicacy.LOG_INFO("STORAGE_SET", "Source storage set to %s", source.StorageURL)
|
||||
sourceStorage := duplicacy.CreateStorage(*source, false, 1)
|
||||
sourceStorage := duplicacy.CreateStorage(*source, false, threads)
|
||||
if sourceStorage == nil {
|
||||
return
|
||||
}
|
||||
@@ -1027,7 +1077,6 @@ func copySnapshots(context *cli.Context) {
|
||||
sourceManager.SetupSnapshotCache(source.Name)
|
||||
duplicacy.SavePassword(*source, "password", sourcePassword)
|
||||
|
||||
|
||||
_, destination := getRepositoryPreference(context, context.String("to"))
|
||||
|
||||
if destination.Name == source.Name {
|
||||
@@ -1041,9 +1090,8 @@ func copySnapshots(context *cli.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
||||
destinationStorage := duplicacy.CreateStorage(*destination, false, 1)
|
||||
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
|
||||
if destinationStorage == nil {
|
||||
return
|
||||
}
|
||||
@@ -1054,8 +1102,8 @@ func copySnapshots(context *cli.Context) {
|
||||
"Enter destination storage password:", false, false)
|
||||
}
|
||||
|
||||
sourceStorage.SetRateLimits(context.Int("download-rate-limit"), 0)
|
||||
destinationStorage.SetRateLimits(0, context.Int("upload-rate-limit"))
|
||||
sourceStorage.SetRateLimits(context.Int("download-limit-rate"), 0)
|
||||
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
|
||||
|
||||
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
|
||||
destinationPassword)
|
||||
@@ -1068,11 +1116,6 @@ func copySnapshots(context *cli.Context) {
|
||||
snapshotID = context.String("id")
|
||||
}
|
||||
|
||||
threads := context.Int("threads")
|
||||
if threads < 1 {
|
||||
threads = 1
|
||||
}
|
||||
|
||||
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, threads)
|
||||
runScript(context, source.Name, "post")
|
||||
}
|
||||
@@ -1094,6 +1137,7 @@ func infoStorage(context *cli.Context) {
|
||||
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
|
||||
}
|
||||
|
||||
resetPasswords := context.Bool("reset-passwords")
|
||||
isEncrypted := context.Bool("e")
|
||||
preference := duplicacy.Preference{
|
||||
Name: "default",
|
||||
@@ -1103,12 +1147,18 @@ func infoStorage(context *cli.Context) {
|
||||
DoNotSavePassword: true,
|
||||
}
|
||||
|
||||
password := ""
|
||||
if isEncrypted {
|
||||
password = duplicacy.GetPassword(preference, "password", "Enter the storage password:", false, false)
|
||||
if resetPasswords {
|
||||
// We don't want password entered for the info command to overwrite the saved password for the default storage,
|
||||
// so we simply assign an empty name.
|
||||
preference.Name = ""
|
||||
}
|
||||
|
||||
storage := duplicacy.CreateStorage(preference, context.Bool("reset-passwords"), 1)
|
||||
password := ""
|
||||
if isEncrypted {
|
||||
password = duplicacy.GetPassword(preference, "password", "Enter the storage password:", false, resetPasswords)
|
||||
}
|
||||
|
||||
storage := duplicacy.CreateStorage(preference, resetPasswords, 1)
|
||||
config, isStorageEncrypted, err := duplicacy.DownloadConfig(storage, password)
|
||||
|
||||
if isStorageEncrypted {
|
||||
@@ -1139,23 +1189,28 @@ func main() {
|
||||
cli.StringFlag{
|
||||
Name: "chunk-size, c",
|
||||
Value: "4M",
|
||||
Usage: "the average size of chunks",
|
||||
Argument: "4M",
|
||||
Usage: "the average size of chunks (defaults to 4M)",
|
||||
Argument: "<size>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "max-chunk-size, max",
|
||||
Usage: "the maximum size of chunks (defaults to chunk-size*4)",
|
||||
Argument: "16M",
|
||||
Argument: "<size>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "min-chunk-size, min",
|
||||
Usage: "the minimum size of chunks (defaults to chunk-size/4)",
|
||||
Argument: "1M",
|
||||
Argument: "<size>",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "iterations",
|
||||
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
|
||||
Argument: "<i>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "pref-dir",
|
||||
Usage: "Specify alternate location for .duplicacy preferences directory (absolute or relative to current directory)",
|
||||
Argument: "<preferences directory path>",
|
||||
Usage: "alternate location for the .duplicacy directory (absolute or relative to current directory)",
|
||||
Argument: "<path>",
|
||||
},
|
||||
},
|
||||
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
||||
@@ -1190,6 +1245,10 @@ func main() {
|
||||
Usage: "the maximum upload rate (in kilobytes/sec)",
|
||||
Argument: "<kB/s>",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dry-run",
|
||||
Usage: "Dry run for testing, don't backup anything. Use with -stats and -d",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "vss",
|
||||
Usage: "enable the Volume Shadow Copy service (Windows only)",
|
||||
@@ -1225,6 +1284,10 @@ func main() {
|
||||
Name: "delete",
|
||||
Usage: "delete files not in the snapshot",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "ignore-owner",
|
||||
Usage: "do not set the original uid/gid on restored files",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "stats",
|
||||
Usage: "show statistics during and after restore",
|
||||
@@ -1334,6 +1397,10 @@ func main() {
|
||||
Name: "stats",
|
||||
Usage: "show deduplication statistics (imply -all and all revisions)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tabular",
|
||||
Usage: "show tabular usage and deduplication statistics (imply -stats, -all, and all revisions)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "storage",
|
||||
Usage: "retrieve snapshots from the specified storage",
|
||||
@@ -1465,7 +1532,7 @@ func main() {
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "delete-only",
|
||||
Usage: "delete fossils previsouly collected (if deletable) and don't collect fossils",
|
||||
Usage: "delete fossils previously collected (if deletable) and don't collect fossils",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "collect-only",
|
||||
@@ -1487,7 +1554,6 @@ func main() {
|
||||
Action: pruneSnapshots,
|
||||
},
|
||||
|
||||
|
||||
{
|
||||
Name: "password",
|
||||
Flags: []cli.Flag{
|
||||
@@ -1496,6 +1562,11 @@ func main() {
|
||||
Usage: "change the password used to access the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "iterations",
|
||||
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
|
||||
Argument: "<i>",
|
||||
},
|
||||
},
|
||||
Usage: "Change the storage password",
|
||||
ArgsUsage: " ",
|
||||
@@ -1507,29 +1578,38 @@ func main() {
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "encrypt, e",
|
||||
Usage: "Encrypt the storage with a password",
|
||||
Usage: "encrypt the storage with a password",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "chunk-size, c",
|
||||
Value: "4M",
|
||||
Usage: "the average size of chunks",
|
||||
Argument: "4M",
|
||||
Usage: "the average size of chunks (default is 4M)",
|
||||
Argument: "<size>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "max-chunk-size, max",
|
||||
Usage: "the maximum size of chunks (defaults to chunk-size * 4)",
|
||||
Argument: "16M",
|
||||
Usage: "the maximum size of chunks (default is chunk-size*4)",
|
||||
Argument: "<size>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "min-chunk-size, min",
|
||||
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
|
||||
Argument: "1M",
|
||||
Usage: "the minimum size of chunks (default is chunk-size/4)",
|
||||
Argument: "<size>",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "iterations",
|
||||
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
|
||||
Argument: "<i>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "copy",
|
||||
Usage: "make the new storage compatible with an existing one to allow for copy operations",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "bit-identical",
|
||||
Usage: "(when using -copy) make the new storage bit-identical to also allow rsync etc.",
|
||||
},
|
||||
},
|
||||
Usage: "Add an additional storage to be used for the existing repository",
|
||||
ArgsUsage: "<storage name> <snapshot id> <storage url>",
|
||||
@@ -1649,7 +1729,6 @@ func main() {
|
||||
ArgsUsage: "<storage url>",
|
||||
Action: infoStorage,
|
||||
},
|
||||
|
||||
}
|
||||
|
||||
app.Flags = []cli.Flag{
|
||||
@@ -1683,7 +1762,7 @@ func main() {
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "2.0.4"
|
||||
app.Version = "2.0.10"
|
||||
|
||||
// If the program is interrupted, call the RunAtError function.
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
31
integration_tests/copy_test.sh
Executable file
31
integration_tests/copy_test.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
#!/bin/bash
|
||||
|
||||
|
||||
. ./test_functions.sh
|
||||
|
||||
fixture
|
||||
|
||||
pushd ${TEST_REPO}
|
||||
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1k
|
||||
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
|
||||
add_file file1
|
||||
add_file file2
|
||||
${DUPLICACY} backup
|
||||
${DUPLICACY} copy -from default -to secondary
|
||||
add_file file3
|
||||
add_file file4
|
||||
${DUPLICACY} backup
|
||||
${DUPLICACY} copy -from default -to secondary
|
||||
${DUPLICACY} check --files -stats -storage default
|
||||
${DUPLICACY} check --files -stats -storage secondary
|
||||
# Prune revisions from default storage
|
||||
${DUPLICACY} -d -v -log prune -r 1-2 -exclusive -exhaustive -storage default
|
||||
# Copy snapshot revisions from secondary back to default
|
||||
${DUPLICACY} copy -from secondary -to default
|
||||
# Check snapshot revisions again to make sure we're ok!
|
||||
${DUPLICACY} check --files -stats -storage default
|
||||
${DUPLICACY} check --files -stats -storage secondary
|
||||
# Check for orphaned or missing chunks
|
||||
${DUPLICACY} prune -exhaustive -exclusive -storage default
|
||||
${DUPLICACY} prune -exhaustive -exclusive -storage secondary
|
||||
popd
|
||||
@@ -6,32 +6,28 @@
|
||||
fixture
|
||||
|
||||
pushd ${TEST_REPO}
|
||||
${DUPLICACY} init integration-tests $TEST_STORAGE -c 4k
|
||||
${DUPLICACY} init integration-tests $TEST_STORAGE -c 4
|
||||
|
||||
# Create 10 20k files
|
||||
add_file file1 20000
|
||||
add_file file2 20000
|
||||
add_file file3 20000
|
||||
add_file file4 20000
|
||||
add_file file5 20000
|
||||
add_file file6 20000
|
||||
add_file file7 20000
|
||||
add_file file8 20000
|
||||
add_file file9 20000
|
||||
add_file file10 20000
|
||||
# Create 10 small files
|
||||
add_file file1 20
|
||||
add_file file2 20
|
||||
rm file3; touch file3
|
||||
add_file file4 20
|
||||
chmod u-r file4
|
||||
add_file file5 20
|
||||
add_file file6 20
|
||||
add_file file7 20
|
||||
add_file file8 20
|
||||
add_file file9 20
|
||||
add_file file10 20
|
||||
|
||||
# Limit the rate to 10k/s so the backup will take about 10 seconds
|
||||
${DUPLICACY} backup -limit-rate 10 -threads 4 &
|
||||
# Kill the backup after 3 seconds
|
||||
DUPLICACY_PID=$!
|
||||
sleep 3
|
||||
kill -2 ${DUPLICACY_PID}
|
||||
# Fail at the 10th chunk
|
||||
env DUPLICACY_FAIL_CHUNK=10 ${DUPLICACY} backup
|
||||
|
||||
# Try it again to test the multiple-resume case
|
||||
${DUPLICACY} backup -limit-rate 10 -threads 4&
|
||||
DUPLICACY_PID=$!
|
||||
sleep 3
|
||||
kill -2 ${DUPLICACY_PID}
|
||||
env DUPLICACY_FAIL_CHUNK=5 ${DUPLICACY} backup
|
||||
add_file file1 20
|
||||
add_file file2 20
|
||||
|
||||
# Fail the backup before uploading the snapshot
|
||||
env DUPLICACY_FAIL_SNAPSHOT=true ${DUPLICACY} backup
|
||||
|
||||
28
integration_tests/sparse_test.sh
Executable file
28
integration_tests/sparse_test.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Testing backup and restore of sparse files
|
||||
|
||||
. ./test_functions.sh
|
||||
|
||||
fixture
|
||||
|
||||
pushd ${TEST_REPO}
|
||||
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1m
|
||||
|
||||
for i in `seq 1 10`; do
|
||||
dd if=/dev/urandom of=file3 bs=1000 count=1000 seek=$((100000 * $i))
|
||||
done
|
||||
|
||||
ls -lsh file3
|
||||
|
||||
${DUPLICACY} backup
|
||||
${DUPLICACY} check --files -stats
|
||||
|
||||
rm file1 file3
|
||||
|
||||
${DUPLICACY} restore -r 1
|
||||
${DUPLICACY} -v restore -r 1 -overwrite -stats -hash
|
||||
|
||||
ls -lsh file3
|
||||
|
||||
popd
|
||||
@@ -10,6 +10,7 @@ backup
|
||||
add_file file3
|
||||
backup
|
||||
add_file file4
|
||||
chmod u-r ${TEST_REPO}/file4
|
||||
backup
|
||||
add_file file5
|
||||
restore
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"bytes"
|
||||
"sync"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"mime/multipart"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -29,6 +29,7 @@ func (err ACDError) Error() string {
|
||||
}
|
||||
|
||||
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
|
||||
|
||||
type ACDClient struct {
|
||||
HTTPClient *http.Client
|
||||
|
||||
@@ -42,7 +43,6 @@ type ACDClient struct {
|
||||
TestMode bool
|
||||
}
|
||||
|
||||
|
||||
func NewACDClient(tokenFile string) (*ACDClient, error) {
|
||||
|
||||
description, err := ioutil.ReadFile(tokenFile)
|
||||
@@ -69,7 +69,7 @@ func NewACDClient(tokenFile string) (*ACDClient, error) {
|
||||
|
||||
func (client *ACDClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
|
||||
|
||||
LOG_DEBUG("ACD_CALL", "Calling %s", url)
|
||||
//LOG_DEBUG("ACD_CALL", "%s %s", method, url)
|
||||
|
||||
var response *http.Response
|
||||
|
||||
@@ -256,7 +256,7 @@ type ACDListEntriesOutput struct {
|
||||
Entries []ACDEntry `json:"data"`
|
||||
}
|
||||
|
||||
func (client *ACDClient) ListEntries(parentID string, listFiles bool) ([]ACDEntry, error) {
|
||||
func (client *ACDClient) ListEntries(parentID string, listFiles bool, listDirectories bool) ([]ACDEntry, error) {
|
||||
|
||||
startToken := ""
|
||||
|
||||
@@ -264,20 +264,22 @@ func (client *ACDClient) ListEntries(parentID string, listFiles bool) ([]ACDEntr
|
||||
|
||||
for {
|
||||
|
||||
url := client.MetadataURL + "nodes/" + parentID + "/children?filters="
|
||||
url := client.MetadataURL + "nodes/" + parentID + "/children?"
|
||||
|
||||
if listFiles {
|
||||
url += "kind:FILE"
|
||||
} else {
|
||||
url += "kind:FOLDER"
|
||||
if listFiles && !listDirectories {
|
||||
url += "filters=kind:FILE&"
|
||||
} else if !listFiles && listDirectories {
|
||||
url += "filters=kind:FOLDER&"
|
||||
}
|
||||
|
||||
if startToken != "" {
|
||||
url += "&startToken=" + startToken
|
||||
url += "startToken=" + startToken + "&"
|
||||
}
|
||||
|
||||
if client.TestMode {
|
||||
url += "&limit=8"
|
||||
url += "limit=8"
|
||||
} else {
|
||||
url += "limit=200"
|
||||
}
|
||||
|
||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"fmt"
|
||||
"testing"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
crypto_rand "crypto/rand"
|
||||
"math/rand"
|
||||
@@ -103,7 +103,7 @@ func TestACDClient(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
entries, err := acdClient.ListEntries(test1ID, true)
|
||||
entries, err := acdClient.ListEntries(test1ID, true, false)
|
||||
if err != nil {
|
||||
t.Errorf("Error list randomly generated files: %v", err)
|
||||
return
|
||||
@@ -117,7 +117,7 @@ func TestACDClient(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
entries, err = acdClient.ListEntries(test2ID, true)
|
||||
entries, err = acdClient.ListEntries(test2ID, true, false)
|
||||
if err != nil {
|
||||
t.Errorf("Error list randomly generated files: %v", err)
|
||||
return
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ACDStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
client *ACDClient
|
||||
idCache map[string]string
|
||||
@@ -35,11 +36,13 @@ func CreateACDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
numberOfThreads: threads,
|
||||
}
|
||||
|
||||
storagePathID, _, _, err := storage.getIDFromPath(0, storagePath)
|
||||
storagePathID, err := storage.getIDFromPath(0, storagePath, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set 'storagePath' as the root of the storage and clean up the id cache accordingly
|
||||
storage.idCache = make(map[string]string)
|
||||
storage.idCache[""] = storagePathID
|
||||
|
||||
for _, dir := range []string{"chunks", "fossils", "snapshots"} {
|
||||
@@ -48,7 +51,6 @@ func CreateACDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
return nil, err
|
||||
}
|
||||
if dirID == "" {
|
||||
dirID, err = client.CreateDirectory(storagePathID, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -58,8 +60,9 @@ func CreateACDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
storage.idCache[dir] = dirID
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
|
||||
}
|
||||
|
||||
func (storage *ACDStorage) getPathID(path string) string {
|
||||
@@ -88,43 +91,90 @@ func (storage *ACDStorage) deletePathID(path string) {
|
||||
storage.idCacheLock.Unlock()
|
||||
}
|
||||
|
||||
|
||||
func (storage *ACDStorage) convertFilePath(filePath string) (string) {
|
||||
// convertFilePath converts the path for a fossil in the form of 'chunks/id.fsl' to 'fossils/id'. This is because
|
||||
// ACD doesn't support file renaming. Instead, it only allows one file to be moved from one directory to another.
|
||||
// By adding a layer of path conversion we're pretending that we can rename between 'chunks/id' and 'chunks/id.fsl'
|
||||
func (storage *ACDStorage) convertFilePath(filePath string) string {
|
||||
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||
}
|
||||
return filePath
|
||||
}
|
||||
|
||||
func (storage *ACDStorage) getIDFromPath(threadIndex int, path string) (fileID string, isDir bool, size int64, err error) {
|
||||
// getIDFromPath returns the id of the given path. If 'createDirectories' is true, create the given path and all its
|
||||
// parent directories if they don't exist. Note that if 'createDirectories' is false, it may return an empty 'fileID'
|
||||
// if the file doesn't exist.
|
||||
func (storage *ACDStorage) getIDFromPath(threadIndex int, filePath string, createDirectories bool) (fileID string, err error) {
|
||||
|
||||
if fileID, ok := storage.findPathID(filePath); ok {
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
parentID, ok := storage.findPathID("")
|
||||
if !ok {
|
||||
parentID, isDir, size, err = storage.client.ListByName("", "")
|
||||
parentID, _, _, err = storage.client.ListByName("", "")
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
return "", err
|
||||
}
|
||||
storage.savePathID("", parentID)
|
||||
}
|
||||
|
||||
names := strings.Split(path, "/")
|
||||
names := strings.Split(filePath, "/")
|
||||
current := ""
|
||||
for i, name := range names {
|
||||
parentID, isDir, _, err = storage.client.ListByName(parentID, name)
|
||||
|
||||
current = path.Join(current, name)
|
||||
fileID, ok := storage.findPathID(current)
|
||||
if ok {
|
||||
parentID = fileID
|
||||
continue
|
||||
}
|
||||
isDir := false
|
||||
fileID, isDir, _, err = storage.client.ListByName(parentID, name)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
return "", err
|
||||
}
|
||||
if parentID == "" {
|
||||
if i == len(names) - 1 {
|
||||
return "", false, 0, nil
|
||||
if fileID == "" {
|
||||
if !createDirectories {
|
||||
return "", nil
|
||||
}
|
||||
// Create the current directory
|
||||
fileID, err = storage.client.CreateDirectory(parentID, name)
|
||||
if err != nil {
|
||||
// Check if the directory has been created by another thread
|
||||
if e, ok := err.(ACDError); !ok || e.Status != 409 {
|
||||
return "", fmt.Errorf("Failed to create directory '%s': %v", current, err)
|
||||
}
|
||||
// A 409 means the directory may have already created by another thread. Wait 10 seconds
|
||||
// until we seed the directory.
|
||||
for i := 0; i < 10; i++ {
|
||||
var createErr error
|
||||
fileID, isDir, _, createErr = storage.client.ListByName(parentID, name)
|
||||
if createErr != nil {
|
||||
return "", createErr
|
||||
}
|
||||
if fileID == "" {
|
||||
time.Sleep(time.Second)
|
||||
} else {
|
||||
return "", false, 0, fmt.Errorf("File path '%s' does not exist", path)
|
||||
break
|
||||
}
|
||||
}
|
||||
if fileID == "" {
|
||||
return "", fmt.Errorf("All attempts to create directory '%s' failed: %v", current, err)
|
||||
}
|
||||
} else {
|
||||
isDir = true
|
||||
}
|
||||
} else {
|
||||
storage.savePathID(current, fileID)
|
||||
}
|
||||
if i != len(names)-1 && !isDir {
|
||||
return "", false, 0, fmt.Errorf("Invalid path %s", path)
|
||||
return "", fmt.Errorf("Path '%s' is not a directory", current)
|
||||
}
|
||||
parentID = fileID
|
||||
}
|
||||
|
||||
return parentID, isDir, size, err
|
||||
return parentID, nil
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
@@ -137,7 +187,7 @@ func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
|
||||
if dir == "snapshots" {
|
||||
|
||||
entries, err := storage.client.ListEntries(storage.getPathID(dir), false)
|
||||
entries, err := storage.client.ListEntries(storage.getPathID(dir), false, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -160,9 +210,10 @@ func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
if pathID == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
storage.savePathID(dir, pathID)
|
||||
}
|
||||
|
||||
entries, err := storage.client.ListEntries(pathID, true)
|
||||
entries, err := storage.client.ListEntries(pathID, true, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -177,22 +228,33 @@ func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
} else {
|
||||
files := []string{}
|
||||
sizes := []int64{}
|
||||
for _, parent := range []string {"chunks", "fossils" } {
|
||||
entries, err := storage.client.ListEntries(storage.getPathID(parent), true)
|
||||
parents := []string{"chunks", "fossils"}
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := parents[i]
|
||||
pathID, ok := storage.findPathID(parent)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
entries, err := storage.client.ListEntries(pathID, true, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Kind != "FOLDER" {
|
||||
name := entry.Name
|
||||
if parent == "fossils" {
|
||||
name += ".fsl"
|
||||
if strings.HasPrefix(parent, "fossils") {
|
||||
name = parent + "/" + name + ".fsl"
|
||||
name = name[len("fossils/"):]
|
||||
} else {
|
||||
name = parent + "/" + name
|
||||
name = name[len("chunks/"):]
|
||||
}
|
||||
|
||||
storage.savePathID(parent + "/" + entry.Name, entry.ID)
|
||||
files = append(files, name)
|
||||
sizes = append(sizes, entry.Size)
|
||||
} else {
|
||||
parents = append(parents, parent+"/"+entry.Name)
|
||||
}
|
||||
storage.savePathID(parent+"/"+entry.Name, entry.ID)
|
||||
}
|
||||
}
|
||||
return files, sizes, nil
|
||||
@@ -203,18 +265,14 @@ func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *ACDStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
filePath = storage.convertFilePath(filePath)
|
||||
fileID, ok := storage.findPathID(filePath)
|
||||
if !ok {
|
||||
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
|
||||
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fileID == "" {
|
||||
LOG_TRACE("ACD_STORAGE", "File %s has disappeared before deletion", filePath)
|
||||
LOG_TRACE("ACD_STORAGE", "File '%s' to be deleted does not exist", filePath)
|
||||
return nil
|
||||
}
|
||||
storage.savePathID(filePath, fileID)
|
||||
}
|
||||
|
||||
err = storage.client.DeleteFile(fileID)
|
||||
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||
@@ -234,11 +292,19 @@ func (storage *ACDStorage) MoveFile(threadIndex int, from string, to string) (er
|
||||
return fmt.Errorf("Attempting to rename file %s with unknown id", from)
|
||||
}
|
||||
|
||||
fromParentID := storage.getPathID("chunks")
|
||||
toParentID := storage.getPathID("fossils")
|
||||
fromParent := path.Dir(from)
|
||||
fromParentID, err := storage.getIDFromPath(threadIndex, fromParent, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to retrieve the id of the parent directory '%s': %v", fromParent, err)
|
||||
}
|
||||
if fromParentID == "" {
|
||||
return fmt.Errorf("The parent directory '%s' does not exist", fromParent)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(from, "fossils") {
|
||||
fromParentID, toParentID = toParentID, fromParentID
|
||||
toParent := path.Dir(to)
|
||||
toParentID, err := storage.getIDFromPath(threadIndex, toParent, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to retrieve the id of the parent directory '%s': %v", toParent, err)
|
||||
}
|
||||
|
||||
err = storage.client.MoveFile(fileID, fromParentID, toParentID)
|
||||
@@ -263,13 +329,17 @@ func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
||||
dir = dir[:len(dir)-1]
|
||||
}
|
||||
|
||||
if dir == "chunks" || dir == "snapshots" {
|
||||
return nil
|
||||
parentPath := path.Dir(dir)
|
||||
if parentPath == "." {
|
||||
parentPath = ""
|
||||
}
|
||||
parentID, ok := storage.findPathID(parentPath)
|
||||
if !ok {
|
||||
return fmt.Errorf("Path directory '%s' has unknown id", parentPath)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(dir, "snapshots/") {
|
||||
name := dir[len("snapshots/"):]
|
||||
dirID, err := storage.client.CreateDirectory(storage.getPathID("snapshots"), name)
|
||||
name := path.Base(dir)
|
||||
dirID, err := storage.client.CreateDirectory(parentID, name)
|
||||
if err != nil {
|
||||
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||
return nil
|
||||
@@ -278,9 +348,6 @@ func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
||||
}
|
||||
}
|
||||
storage.savePathID(dir, dirID)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -293,8 +360,21 @@ func (storage *ACDStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
||||
}
|
||||
|
||||
filePath = storage.convertFilePath(filePath)
|
||||
fileID := ""
|
||||
fileID, isDir, size, err = storage.getIDFromPath(threadIndex, filePath)
|
||||
|
||||
parentPath := path.Dir(filePath)
|
||||
if parentPath == "." {
|
||||
parentPath = ""
|
||||
}
|
||||
parentID, err := storage.getIDFromPath(threadIndex, parentPath, false)
|
||||
if err != nil {
|
||||
return false, false, 0, err
|
||||
}
|
||||
if parentID == "" {
|
||||
return false, false, 0, nil
|
||||
}
|
||||
|
||||
name := path.Base(filePath)
|
||||
fileID, isDir, size, err := storage.client.ListByName(parentID, name)
|
||||
if err != nil {
|
||||
return false, false, 0, err
|
||||
}
|
||||
@@ -302,44 +382,19 @@ func (storage *ACDStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
||||
return false, false, 0, nil
|
||||
}
|
||||
|
||||
storage.savePathID(filePath, fileID)
|
||||
return true, isDir, size, nil
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *ACDStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
parentID := ""
|
||||
filePath = "chunks/" + chunkID
|
||||
realPath := filePath
|
||||
if isFossil {
|
||||
parentID = storage.getPathID("fossils")
|
||||
filePath += ".fsl"
|
||||
realPath = "fossils/" + chunkID + ".fsl"
|
||||
} else {
|
||||
parentID = storage.getPathID("chunks")
|
||||
}
|
||||
|
||||
fileID := ""
|
||||
fileID, _, size, err = storage.client.ListByName(parentID, chunkID)
|
||||
if fileID != "" {
|
||||
storage.savePathID(realPath, fileID)
|
||||
}
|
||||
return filePath, fileID != "", size, err
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *ACDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
fileID, ok := storage.findPathID(filePath)
|
||||
if !ok {
|
||||
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
|
||||
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fileID == "" {
|
||||
return fmt.Errorf("File path '%s' does not exist", filePath)
|
||||
}
|
||||
storage.savePathID(filePath, fileID)
|
||||
}
|
||||
|
||||
readCloser, _, err := storage.client.DownloadFile(fileID)
|
||||
if err != nil {
|
||||
@@ -355,23 +410,17 @@ func (storage *ACDStorage) DownloadFile(threadIndex int, filePath string, chunk
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
parent := path.Dir(filePath)
|
||||
|
||||
if parent == "." {
|
||||
parent = ""
|
||||
}
|
||||
|
||||
parentID, ok := storage.findPathID(parent)
|
||||
|
||||
if !ok {
|
||||
parentID, _, _, err = storage.getIDFromPath(threadIndex, parent)
|
||||
parentID, err := storage.getIDFromPath(threadIndex, parent, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if parentID == "" {
|
||||
return fmt.Errorf("File path '%s' does not exist", parent)
|
||||
}
|
||||
storage.savePathID(parent, parentID)
|
||||
}
|
||||
|
||||
fileID, err := storage.client.UploadFile(parentID, path.Base(filePath), content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||
if err == nil {
|
||||
@@ -389,16 +438,16 @@ func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *ACDStorage) IsCacheNeeded() (bool) { return true }
|
||||
func (storage *ACDStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *ACDStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *ACDStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *ACDStorage) IsStrongConsistent() (bool) { return true }
|
||||
func (storage *ACDStorage) IsStrongConsistent() bool { return true }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *ACDStorage) IsFastListing() (bool) { return true }
|
||||
func (storage *ACDStorage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *ACDStorage) EnableTestMode() {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
@@ -12,16 +12,15 @@ import (
|
||||
)
|
||||
|
||||
type AzureStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
clients []*storage.BlobStorageClient
|
||||
container string
|
||||
containers []*storage.Container
|
||||
}
|
||||
|
||||
func CreateAzureStorage(accountName string, accountKey string,
|
||||
container string, threads int) (azureStorage *AzureStorage, err error) {
|
||||
containerName string, threads int) (azureStorage *AzureStorage, err error) {
|
||||
|
||||
var clients []*storage.BlobStorageClient
|
||||
var containers []*storage.Container
|
||||
for i := 0; i < threads; i++ {
|
||||
|
||||
client, err := storage.NewBasicClient(accountName, accountKey)
|
||||
@@ -31,23 +30,25 @@ func CreateAzureStorage(accountName string, accountKey string,
|
||||
}
|
||||
|
||||
blobService := client.GetBlobService()
|
||||
clients = append(clients, &blobService)
|
||||
container := blobService.GetContainerReference(containerName)
|
||||
containers = append(containers, container)
|
||||
}
|
||||
|
||||
exist, err := clients[0].ContainerExists(container)
|
||||
exist, err := containers[0].Exists()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("container %s does not exist", container)
|
||||
return nil, fmt.Errorf("container %s does not exist", containerName)
|
||||
}
|
||||
|
||||
azureStorage = &AzureStorage{
|
||||
clients: clients,
|
||||
container: container,
|
||||
containers: containers,
|
||||
}
|
||||
|
||||
azureStorage.DerivedStorage = azureStorage
|
||||
azureStorage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -77,7 +78,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
||||
|
||||
for {
|
||||
|
||||
results, err := azureStorage.clients[threadIndex].ListBlobs(azureStorage.container, parameters)
|
||||
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -115,14 +116,15 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
||||
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
_, err = storage.clients[threadIndex].DeleteBlobIfExists(storage.container, filePath)
|
||||
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// MoveFile renames the file.
|
||||
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
source := storage.clients[threadIndex].GetBlobURL(storage.container, from)
|
||||
err = storage.clients[threadIndex].CopyBlob(storage.container, to, source)
|
||||
source := storage.containers[threadIndex].GetBlobReference(from)
|
||||
destination := storage.containers[threadIndex].GetBlobReference(to)
|
||||
err = destination.Copy(source.GetURL(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -136,7 +138,8 @@ func (storage *AzureStorage) CreateDirectory(threadIndex int, dir string) (err e
|
||||
|
||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||
properties, err := storage.clients[threadIndex].GetBlobProperties(storage.container, filePath)
|
||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||
err = blob.GetProperties(nil)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "404") {
|
||||
return false, false, 0, nil
|
||||
@@ -145,58 +148,42 @@ func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exis
|
||||
}
|
||||
}
|
||||
|
||||
return true, false, properties.ContentLength, nil
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *AzureStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
filePath = "chunks/" + chunkID
|
||||
if isFossil {
|
||||
filePath += ".fsl"
|
||||
}
|
||||
|
||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
} else {
|
||||
return filePath, exist, size, err
|
||||
}
|
||||
return true, false, blob.Properties.ContentLength, nil
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
readCloser, err := storage.clients[threadIndex].GetBlob(storage.container, filePath)
|
||||
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.clients))
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.containers))
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.clients))
|
||||
return storage.clients[threadIndex].CreateBlockBlobFromReader(storage.container, filePath, uint64(len(content)), reader, nil)
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||
return blob.CreateBlockBlobFromReader(reader, nil)
|
||||
|
||||
}
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *AzureStorage) IsCacheNeeded() (bool) { return true }
|
||||
func (storage *AzureStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *AzureStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *AzureStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *AzureStorage) IsStrongConsistent() (bool) { return true }
|
||||
func (storage *AzureStorage) IsStrongConsistent() bool { return true }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *AzureStorage) IsFastListing() (bool) { return true }
|
||||
func (storage *AzureStorage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *AzureStorage) EnableTestMode() {}
|
||||
|
||||
@@ -1,23 +1,23 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"bytes"
|
||||
"strconv"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"crypto/sha1"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type B2Error struct {
|
||||
@@ -51,7 +51,6 @@ type B2Client struct {
|
||||
UploadToken string
|
||||
|
||||
TestMode bool
|
||||
|
||||
}
|
||||
|
||||
func NewB2Client(accountID string, applicationKey string) *B2Client {
|
||||
@@ -82,32 +81,41 @@ func (client *B2Client) retry(backoff int, response *http.Response) int {
|
||||
return backoff
|
||||
}
|
||||
|
||||
func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int64, error) {
|
||||
func (client *B2Client) call(url string, method string, requestHeaders map[string]string, input interface{}) (io.ReadCloser, http.Header, int64, error) {
|
||||
|
||||
switch method {
|
||||
case http.MethodGet:
|
||||
break
|
||||
case http.MethodHead:
|
||||
break
|
||||
case http.MethodPost:
|
||||
break
|
||||
default:
|
||||
return nil, nil, 0, fmt.Errorf("unhandled http request method: " + method)
|
||||
}
|
||||
|
||||
var response *http.Response
|
||||
|
||||
backoff := 0
|
||||
for i := 0; i < 8; i++ {
|
||||
var inputReader *bytes.Reader
|
||||
method := "POST"
|
||||
|
||||
switch input.(type) {
|
||||
default:
|
||||
jsonInput, err := json.Marshal(input)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
inputReader = bytes.NewReader(jsonInput)
|
||||
case []byte:
|
||||
inputReader = bytes.NewReader(input.([]byte))
|
||||
case int:
|
||||
method = "GET"
|
||||
inputReader = bytes.NewReader([]byte(""))
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(method, url, inputReader)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
if url == B2AuthorizationURL {
|
||||
@@ -116,6 +124,12 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
|
||||
request.Header.Set("Authorization", client.AuthorizationToken)
|
||||
}
|
||||
|
||||
if requestHeaders != nil {
|
||||
for key, value := range requestHeaders {
|
||||
request.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
if client.TestMode {
|
||||
r := rand.Float32()
|
||||
if r < 0.5 {
|
||||
@@ -132,11 +146,11 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
}
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
if response.StatusCode < 400 {
|
||||
return response.Body, response.ContentLength, nil
|
||||
if response.StatusCode < 300 {
|
||||
return response.Body, response.Header, response.ContentLength, nil
|
||||
}
|
||||
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
@@ -145,36 +159,49 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
|
||||
response.Body.Close()
|
||||
if response.StatusCode == 401 {
|
||||
if url == B2AuthorizationURL {
|
||||
return nil, 0, fmt.Errorf("Authorization failure")
|
||||
return nil, nil, 0, fmt.Errorf("Authorization failure")
|
||||
}
|
||||
client.AuthorizeAccount()
|
||||
continue
|
||||
} else if response.StatusCode == 403 {
|
||||
if !client.TestMode {
|
||||
return nil, 0, fmt.Errorf("B2 cap exceeded")
|
||||
return nil, nil, 0, fmt.Errorf("B2 cap exceeded")
|
||||
}
|
||||
continue
|
||||
} else if response.StatusCode == 404 {
|
||||
if http.MethodHead == method {
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
return nil, nil, 0, nil
|
||||
}
|
||||
} else if response.StatusCode == 416 {
|
||||
if http.MethodHead == method {
|
||||
// 416 Requested Range Not Satisfiable
|
||||
return nil, nil, 0, fmt.Errorf("URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
}
|
||||
} else if response.StatusCode == 429 || response.StatusCode == 408 {
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
} else {
|
||||
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
e := &B2Error {
|
||||
}
|
||||
e := &B2Error{}
|
||||
|
||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||
return nil, 0, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
return nil, 0, e
|
||||
return nil, nil, 0, e
|
||||
}
|
||||
|
||||
return nil, 0, fmt.Errorf("Maximum backoff reached")
|
||||
return nil, nil, 0, fmt.Errorf("Maximum backoff reached")
|
||||
}
|
||||
|
||||
type B2AuthorizeAccountOutput struct {
|
||||
@@ -186,7 +213,7 @@ type B2AuthorizeAccountOutput struct {
|
||||
|
||||
func (client *B2Client) AuthorizeAccount() (err error) {
|
||||
|
||||
readCloser, _, err := client.call(B2AuthorizationURL, make(map[string]string))
|
||||
readCloser, _, _, err := client.call(B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -220,7 +247,7 @@ func (client *B2Client) FindBucket(bucketName string) (err error) {
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_list_buckets"
|
||||
|
||||
readCloser, _, err := client.call(url, input)
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, nil, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -285,17 +312,76 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
|
||||
for {
|
||||
url := client.APIURL + "/b2api/v1/b2_list_file_names"
|
||||
requestHeaders := map[string]string{}
|
||||
requestMethod := http.MethodPost
|
||||
var requestInput interface{}
|
||||
requestInput = input
|
||||
if includeVersions {
|
||||
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
|
||||
} else if singleFile {
|
||||
// handle a single file with no versions as a special case to download the last byte of the file
|
||||
url = client.DownloadURL + "/file/" + client.BucketName + "/" + startFileName
|
||||
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
|
||||
requestHeaders["Range"] = "bytes=-1"
|
||||
// HEAD request
|
||||
requestMethod = http.MethodHead
|
||||
requestInput = 0
|
||||
}
|
||||
readCloser, _, err := client.call(url, input)
|
||||
var readCloser io.ReadCloser
|
||||
var responseHeader http.Header
|
||||
var err error
|
||||
readCloser, responseHeader, _, err = client.call(url, requestMethod, requestHeaders, requestInput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if readCloser != nil {
|
||||
defer readCloser.Close()
|
||||
}
|
||||
|
||||
output := B2ListFileNamesOutput {
|
||||
output := B2ListFileNamesOutput{}
|
||||
|
||||
if singleFile && !includeVersions {
|
||||
if responseHeader == nil {
|
||||
LOG_DEBUG("BACKBLAZE_LIST", "b2_download_file_by_name did not return headers")
|
||||
return []*B2Entry{}, nil
|
||||
}
|
||||
requiredHeaders := []string{
|
||||
"x-bz-file-id",
|
||||
"x-bz-file-name",
|
||||
}
|
||||
missingKeys := []string{}
|
||||
for _, headerKey := range requiredHeaders {
|
||||
if "" == responseHeader.Get(headerKey) {
|
||||
missingKeys = append(missingKeys, headerKey)
|
||||
}
|
||||
}
|
||||
if len(missingKeys) > 0 {
|
||||
return nil, fmt.Errorf("b2_download_file_by_name missing headers: %s", missingKeys)
|
||||
}
|
||||
// construct the B2Entry from the response headers of the download request
|
||||
fileID := responseHeader.Get("x-bz-file-id")
|
||||
fileName := responseHeader.Get("x-bz-file-name")
|
||||
fileAction := "upload"
|
||||
// byte range that is returned: "bytes #-#/#
|
||||
rangeString := responseHeader.Get("Content-Range")
|
||||
// total file size; 1 if file has content, 0 if it's empty
|
||||
lengthString := responseHeader.Get("Content-Length")
|
||||
var fileSize int64
|
||||
if "" != rangeString {
|
||||
fileSize, _ = strconv.ParseInt(rangeString[strings.Index(rangeString, "/")+1:], 0, 64)
|
||||
} else if "" != lengthString {
|
||||
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
|
||||
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
|
||||
if fileSize != 0 {
|
||||
return nil, fmt.Errorf("b2_download_file_by_name returned non-zero file length")
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("could not parse b2_download_file_by_name headers")
|
||||
}
|
||||
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
|
||||
|
||||
return []*B2Entry{&B2Entry{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil
|
||||
}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
@@ -351,7 +437,7 @@ func (client *B2Client) DeleteFile(fileName string, fileID string) (err error) {
|
||||
input["fileId"] = fileID
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
|
||||
readCloser, _, err := client.call(url, input)
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -371,7 +457,7 @@ func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
||||
input["fileName"] = fileName
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_hide_file"
|
||||
readCloser, _, err := client.call(url, input)
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -392,7 +478,8 @@ func (client *B2Client) DownloadFile(filePath string) (io.ReadCloser, int64, err
|
||||
|
||||
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
|
||||
|
||||
return client.call(url, 0)
|
||||
readCloser, _, len, err := client.call(url, http.MethodGet, make(map[string]string), 0)
|
||||
return readCloser, len, err
|
||||
}
|
||||
|
||||
type B2GetUploadArgumentOutput struct {
|
||||
@@ -401,12 +488,12 @@ type B2GetUploadArgumentOutput struct {
|
||||
AuthorizationToken string
|
||||
}
|
||||
|
||||
func (client *B2Client) getUploadURL() (error) {
|
||||
func (client *B2Client) getUploadURL() error {
|
||||
input := make(map[string]string)
|
||||
input["bucketId"] = client.BucketID
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
|
||||
readCloser, _, err := client.call(url, input)
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -427,7 +514,6 @@ func (client *B2Client) getUploadURL() (error) {
|
||||
|
||||
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
||||
|
||||
|
||||
hasher := sha1.New()
|
||||
hasher.Write(content)
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
@@ -487,7 +573,7 @@ func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit in
|
||||
io.Copy(ioutil.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
|
||||
if response.StatusCode < 400 {
|
||||
if response.StatusCode < 300 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -513,4 +599,3 @@ func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit in
|
||||
|
||||
return fmt.Errorf("Maximum backoff reached")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
crypto_rand "crypto/rand"
|
||||
"math/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
type B2Storage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
clients []*B2Client
|
||||
}
|
||||
@@ -38,6 +38,9 @@ func CreateB2Storage(accountID string, applicationKey string, bucket string, thr
|
||||
storage = &B2Storage{
|
||||
clients: clients,
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
@@ -204,17 +207,6 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
||||
return true, false, entries[0].Size, nil
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *B2Storage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
filePath = "chunks/" + chunkID
|
||||
if isFossil {
|
||||
filePath += ".fsl"
|
||||
}
|
||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||
return filePath, exist, size, err
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
@@ -236,16 +228,16 @@ func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content [
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *B2Storage) IsCacheNeeded() (bool) { return true }
|
||||
func (storage *B2Storage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *B2Storage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *B2Storage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *B2Storage) IsStrongConsistent() (bool) { return true }
|
||||
func (storage *B2Storage) IsStrongConsistent() bool { return true }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *B2Storage) IsFastListing() (bool) { return true }
|
||||
func (storage *B2Storage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *B2Storage) EnableTestMode() {
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"bytes"
|
||||
"os"
|
||||
"io"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"strings"
|
||||
"strconv"
|
||||
"encoding/hex"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to
|
||||
@@ -34,7 +35,9 @@ type BackupManager struct {
|
||||
config *Config // contains a number of options
|
||||
}
|
||||
|
||||
|
||||
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
||||
manager.config.dryRun = dryRun
|
||||
}
|
||||
|
||||
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
||||
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
||||
@@ -76,7 +79,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||
preferencePath := GetDuplicacyPreferencePath()
|
||||
cacheDir := path.Join(preferencePath, "cache", storageName)
|
||||
|
||||
storage, err := CreateFileStorage(cacheDir, 1)
|
||||
storage, err := CreateFileStorage(cacheDir, false, 1)
|
||||
if err != nil {
|
||||
LOG_ERROR("BACKUP_CACHE", "Failed to create the snapshot cache dir: %v", err)
|
||||
return false
|
||||
@@ -90,6 +93,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
storage.SetDefaultNestingLevels([]int{1}, 1)
|
||||
manager.snapshotCache = storage
|
||||
manager.SnapshotManager.snapshotCache = storage
|
||||
return true
|
||||
@@ -142,6 +146,12 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
|
||||
}
|
||||
totalChunkSize += int64(length)
|
||||
}
|
||||
|
||||
// If there are some unvisited entries (which happens when saving an incomplete snapshot),
|
||||
// set their sizes to -1 so they won't be saved to the incomplete snapshot
|
||||
for j := i; j < len(entries); j++ {
|
||||
entries[j].Size = -1
|
||||
}
|
||||
}
|
||||
|
||||
// Backup creates a snapshot for the repository 'top'. If 'quickMode' is true, only files with different sizes
|
||||
@@ -219,7 +229,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if incompleteSnapshot != nil {
|
||||
|
||||
// This is the last chunk from the incomplete snapshot that can be found in the cache
|
||||
@@ -248,15 +257,15 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
incompleteSnapshot.ChunkHashes = incompleteSnapshot.ChunkHashes[:lastCompleteChunk+1]
|
||||
incompleteSnapshot.ChunkLengths = incompleteSnapshot.ChunkLengths[:lastCompleteChunk+1]
|
||||
remoteSnapshot = incompleteSnapshot
|
||||
LOG_INFO("FILE_SKIP", "Skipped %d files from previous incomplete backup", len(files))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var numberOfNewFileChunks int // number of new file chunks
|
||||
var numberOfNewFileChunks int64 // number of new file chunks
|
||||
var totalUploadedFileChunkLength int64 // total length of uploaded file chunks
|
||||
var totalUploadedFileChunkBytes int64 // how many actual bytes have been uploaded
|
||||
|
||||
var numberOfNewSnapshotChunks int // number of new snapshot chunks
|
||||
var totalUploadedSnapshotChunkLength int64 // size of uploaded snapshot chunks
|
||||
var totalUploadedSnapshotChunkBytes int64 // how many actual bytes have been uploaded
|
||||
|
||||
@@ -355,6 +364,12 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
var uploadedChunkLengths []int
|
||||
var uploadedChunkLock = &sync.Mutex{}
|
||||
|
||||
// Set all file sizes to -1 to indicate they haven't been processed. This must be done before creating the file
|
||||
// reader because the file reader may skip inaccessible files on construction.
|
||||
for _, entry := range modifiedEntries {
|
||||
entry.Size = -1
|
||||
}
|
||||
|
||||
// the file reader implements the Reader interface. When an EOF is encounter, it opens the next file unless it
|
||||
// is the last file.
|
||||
fileReader := CreateFileReader(shadowTop, modifiedEntries)
|
||||
@@ -374,6 +389,13 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
keepUploadAlive = int64(value)
|
||||
}
|
||||
|
||||
// Fail at the chunk specified by DUPLICACY_FAIL_CHUNK to simulate a backup error
|
||||
chunkToFail := -1
|
||||
if value, found := os.LookupEnv("DUPLICACY_FAIL_CHUNK"); found {
|
||||
chunkToFail, _ = strconv.Atoi(value)
|
||||
LOG_INFO("SNAPSHOT_FAIL", "Will abort the backup on chunk %d", chunkToFail)
|
||||
}
|
||||
|
||||
chunkMaker := CreateChunkMaker(manager.config, false)
|
||||
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
|
||||
|
||||
@@ -388,16 +410,16 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
if !localSnapshotReady {
|
||||
// Lock it to gain exclusive access to uploadedChunkHashes and uploadedChunkLengths
|
||||
uploadedChunkLock.Lock()
|
||||
for _, entry := range uploadedEntries {
|
||||
entry.EndChunk = -1
|
||||
}
|
||||
setEntryContent(uploadedEntries, uploadedChunkLengths, len(preservedChunkHashes))
|
||||
if len(preservedChunkHashes) > 0 {
|
||||
//localSnapshot.Files = preservedEntries
|
||||
//localSnapshot.Files = append(preservedEntries, uploadedEntries...)
|
||||
localSnapshot.ChunkHashes = preservedChunkHashes
|
||||
localSnapshot.ChunkHashes = append(localSnapshot.ChunkHashes, uploadedChunkHashes...)
|
||||
localSnapshot.ChunkLengths = preservedChunkLengths
|
||||
localSnapshot.ChunkLengths = append(localSnapshot.ChunkLengths, uploadedChunkLengths...)
|
||||
} else {
|
||||
//localSnapshot.Files = uploadedEntries
|
||||
localSnapshot.ChunkHashes = uploadedChunkHashes
|
||||
localSnapshot.ChunkLengths = uploadedChunkLengths
|
||||
}
|
||||
@@ -428,16 +450,16 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
LOG_DEBUG("CHUNK_CACHE", "Skipped chunk %s in cache", chunk.GetID())
|
||||
} else {
|
||||
if uploadSize > 0 {
|
||||
numberOfNewFileChunks++
|
||||
totalUploadedFileChunkLength += int64(chunkSize)
|
||||
totalUploadedFileChunkBytes += int64(uploadSize)
|
||||
atomic.AddInt64(&numberOfNewFileChunks, 1)
|
||||
atomic.AddInt64(&totalUploadedFileChunkLength, int64(chunkSize))
|
||||
atomic.AddInt64(&totalUploadedFileChunkBytes, int64(uploadSize))
|
||||
action = "Uploaded"
|
||||
} else {
|
||||
LOG_DEBUG("CHUNK_EXIST", "Skipped chunk %s in the storage", chunk.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
uploadedModifiedFileSize += int64(chunkSize)
|
||||
uploadedModifiedFileSize := atomic.AddInt64(&uploadedModifiedFileSize, int64(chunkSize))
|
||||
|
||||
if IsTracing() || showStatistics {
|
||||
now := time.Now().Unix()
|
||||
@@ -494,15 +516,21 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
uploadedChunkLengths = append(uploadedChunkLengths, chunkSize)
|
||||
uploadedChunkLock.Unlock()
|
||||
|
||||
if len(uploadedChunkHashes) == chunkToFail {
|
||||
LOG_ERROR("SNAPSHOT_FAIL", "Artificially fail the chunk %d for testing purposes", chunkToFail)
|
||||
}
|
||||
|
||||
},
|
||||
func(fileSize int64, hash string) (io.Reader, bool) {
|
||||
|
||||
// Must lock here because the RunAtError function called by other threads may access uploadedEntries
|
||||
uploadedChunkLock.Lock()
|
||||
defer uploadedChunkLock.Unlock()
|
||||
|
||||
// This function is called when a new file is needed
|
||||
entry := fileReader.CurrentEntry
|
||||
entry.Hash = hash
|
||||
if entry.Size != fileSize {
|
||||
totalModifiedFileSize += fileSize - entry.Size
|
||||
entry.Size = fileSize
|
||||
}
|
||||
uploadedEntries = append(uploadedEntries, entry)
|
||||
|
||||
if !showStatistics || IsTracing() || RunInBackground {
|
||||
@@ -544,6 +572,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
|
||||
err = manager.SnapshotManager.CheckSnapshot(localSnapshot)
|
||||
if err != nil {
|
||||
RunAtError = func() {} // Don't save the incomplete snapshot
|
||||
LOG_ERROR("SNAPSHOT_CHECK", "The snapshot contains an error: %v", err)
|
||||
return false
|
||||
}
|
||||
@@ -594,7 +623,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
}
|
||||
|
||||
for _, dir := range skippedDirectories {
|
||||
LOG_WARN("SKIP_DIRECTORY", "Subdirecotry %s cannot be listed", dir)
|
||||
LOG_WARN("SKIP_DIRECTORY", "Subdirectory %s cannot be listed", dir)
|
||||
}
|
||||
|
||||
for _, file := range fileReader.SkippedFiles {
|
||||
@@ -602,7 +631,9 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
}
|
||||
skippedFiles = append(skippedFiles, fileReader.SkippedFiles...)
|
||||
|
||||
if !manager.config.dryRun {
|
||||
manager.SnapshotManager.CleanSnapshotCache(localSnapshot, nil)
|
||||
}
|
||||
LOG_INFO("BACKUP_END", "Backup for %s at revision %d completed", top, localSnapshot.Revision)
|
||||
|
||||
RunAtError = func() {}
|
||||
@@ -630,7 +661,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
LOG_INFO("BACKUP_STATS", "All chunks: %d total, %s bytes; %d new, %s bytes, %s bytes uploaded",
|
||||
len(localSnapshot.ChunkHashes)+totalSnapshotChunks,
|
||||
PrettyNumber(totalFileChunkLength+totalSnapshotChunkLength),
|
||||
numberOfNewFileChunks + numberOfNewSnapshotChunks,
|
||||
int(numberOfNewFileChunks)+numberOfNewSnapshotChunks,
|
||||
PrettyNumber(totalUploadedFileChunkLength+totalUploadedSnapshotChunkLength),
|
||||
PrettyNumber(totalUploadedFileChunkBytes+totalUploadedSnapshotChunkBytes))
|
||||
|
||||
@@ -681,13 +712,18 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will
|
||||
// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files.
|
||||
func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool,
|
||||
deleteMode bool, showStatistics bool, patterns [] string) bool {
|
||||
deleteMode bool, setOwner bool, showStatistics bool, patterns []string) bool {
|
||||
|
||||
startTime := time.Now().Unix()
|
||||
|
||||
LOG_DEBUG("RESTORE_PARAMETERS", "top: %s, revision: %d, in-place: %t, quick: %t, delete: %t",
|
||||
top, revision, inPlace, quickMode, deleteMode)
|
||||
|
||||
if !strings.HasPrefix(GetDuplicacyPreferencePath(), top) {
|
||||
LOG_INFO("RESTORE_INPLACE", "Forcing in-place mode with a non-default preference path")
|
||||
inPlace = true
|
||||
}
|
||||
|
||||
if len(patterns) > 0 {
|
||||
for _, pattern := range patterns {
|
||||
LOG_TRACE("RESTORE_PATTERN", "%s", pattern)
|
||||
@@ -728,6 +764,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
for _, file := range remoteSnapshot.Files {
|
||||
|
||||
if MatchPath(file.Path, patterns) {
|
||||
LOG_TRACE("RESTORE_INCLUDE", "Include %s", file.Path)
|
||||
includedFiles = append(includedFiles, file)
|
||||
} else {
|
||||
LOG_TRACE("RESTORE_EXCLUDE", "Exclude %s", file.Path)
|
||||
@@ -749,6 +786,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
i := 0
|
||||
for _, entry := range remoteSnapshot.Files {
|
||||
|
||||
skipped := false
|
||||
// Find local files that don't exist in the remote snapshot
|
||||
for i < len(localSnapshot.Files) {
|
||||
local := localSnapshot.Files[i]
|
||||
@@ -760,11 +798,18 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
} else {
|
||||
if compare == 0 {
|
||||
i++
|
||||
if quickMode && local.IsSameAs(entry) {
|
||||
skipped = true
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if skipped {
|
||||
continue
|
||||
}
|
||||
|
||||
fullPath := joinPath(top, entry.Path)
|
||||
if entry.IsLink() {
|
||||
stat, err := os.Lstat(fullPath)
|
||||
@@ -834,7 +879,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
if quickMode {
|
||||
if file.IsSameAsFileInfo(stat) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path)
|
||||
file.RestoreMetadata(fullPath, &stat)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -859,7 +903,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
}
|
||||
newFile.Close()
|
||||
|
||||
file.RestoreMetadata(fullPath, nil)
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
if !showStatistics {
|
||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (0)", file.Path)
|
||||
}
|
||||
@@ -871,14 +915,15 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
totalFileSize, downloadedFileSize, startDownloadingTime) {
|
||||
downloadedFileSize += file.Size
|
||||
downloadedFiles = append(downloadedFiles, file)
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
}
|
||||
|
||||
file.RestoreMetadata(fullPath, nil)
|
||||
}
|
||||
|
||||
|
||||
if deleteMode && len(patterns) == 0 {
|
||||
for _, file := range extraFiles {
|
||||
// Reverse the order to make sure directories are empty before being deleted
|
||||
for i := range extraFiles {
|
||||
file := extraFiles[len(extraFiles)-1-i]
|
||||
fullPath := joinPath(top, file)
|
||||
os.Remove(fullPath)
|
||||
LOG_INFO("RESTORE_DELETE", "Deleted %s", file)
|
||||
@@ -888,12 +933,10 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
for _, entry := range remoteSnapshot.Files {
|
||||
if entry.IsDir() && !entry.IsLink() {
|
||||
dir := joinPath(top, entry.Path)
|
||||
entry.RestoreMetadata(dir, nil)
|
||||
entry.RestoreMetadata(dir, nil, setOwner)
|
||||
}
|
||||
}
|
||||
|
||||
RemoveEmptyDirectories(top)
|
||||
|
||||
if showStatistics {
|
||||
for _, file := range downloadedFiles {
|
||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size)
|
||||
@@ -971,7 +1014,7 @@ func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
|
||||
// UploadSnapshot uploads the specified snapshot to the storage. It turns Files, ChunkHashes, and ChunkLengths into
|
||||
// sequences of chunks, and uploads these chunks, and finally the snapshot file.
|
||||
func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *ChunkUploader, top string, snapshot *Snapshot,
|
||||
chunkCache map[string]bool, ) ( totalSnapshotChunkSize int64,
|
||||
chunkCache map[string]bool) (totalSnapshotChunkSize int64,
|
||||
numberOfNewSnapshotChunks int, totalUploadedSnapshotChunkSize int64,
|
||||
totalUploadedSnapshotChunkBytes int64) {
|
||||
|
||||
@@ -986,7 +1029,7 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
||||
totalUploadedSnapshotChunkSize += int64(chunkSize)
|
||||
totalUploadedSnapshotChunkBytes += int64(uploadSize)
|
||||
} else {
|
||||
LOG_DEBUG("CHUNK_EXIST", "Skipped snpashot chunk %s in the storage", chunk.GetID())
|
||||
LOG_DEBUG("CHUNK_EXIST", "Skipped snapshot chunk %s in the storage", chunk.GetID())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1067,8 +1110,9 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
||||
}
|
||||
|
||||
path := fmt.Sprintf("snapshots/%s/%d", manager.snapshotID, snapshot.Revision)
|
||||
if !manager.config.dryRun {
|
||||
manager.SnapshotManager.UploadFile(path, path, description)
|
||||
|
||||
}
|
||||
return totalSnapshotChunkSize, numberOfNewSnapshotChunks, totalUploadedSnapshotChunkSize, totalUploadedSnapshotChunkBytes
|
||||
}
|
||||
|
||||
@@ -1110,13 +1154,111 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
var offset int64
|
||||
|
||||
existingFile, err = os.Open(fullPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// macOS has no sparse file support
|
||||
if inPlace && entry.Size > 100*1024*1024 && runtime.GOOS != "darwin" {
|
||||
// Create an empty sparse file
|
||||
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
}
|
||||
|
||||
n := int64(1)
|
||||
// There is a go bug on Windows (https://github.com/golang/go/issues/21681) that causes Seek to fail
|
||||
// if the lower 32 bit of the offset argument is 0xffffffff. Therefore we need to avoid that value by increasing n.
|
||||
if uint32(entry.Size) == 0 && (entry.Size>>32) > 0 {
|
||||
n = int64(2)
|
||||
}
|
||||
_, err = existingFile.Seek(entry.Size-n, 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
}
|
||||
_, err = existingFile.Write([]byte("\x00\x00")[:n])
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to initialize the sparse file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
}
|
||||
existingFile.Close()
|
||||
existingFile, err = os.Open(fullPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Can't reopen the initial file just created: %v", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
|
||||
}
|
||||
} else {
|
||||
if !overwrite {
|
||||
LOG_ERROR("DOWNLOAD_OVERWRITE",
|
||||
"File %s already exists. Please specify the -overwrite option to continue", entry.Path)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
fileHash := ""
|
||||
if existingFile != nil {
|
||||
// Break existing file into chunks.
|
||||
|
||||
if inPlace {
|
||||
// In inplace mode, we only consider chunks in the existing file with the same offsets, so we
|
||||
// break the original file at offsets retrieved from the backup
|
||||
fileHasher := manager.config.NewFileHasher()
|
||||
buffer := make([]byte, 64*1024)
|
||||
err = nil
|
||||
// We set to read one more byte so the file hash will be different if the file to be restored is a
|
||||
// truncated portion of the existing file
|
||||
for i := entry.StartChunk; i <= entry.EndChunk+1; i++ {
|
||||
hasher := manager.config.NewKeyedHasher(manager.config.HashKey)
|
||||
chunkSize := 0
|
||||
if i == entry.StartChunk {
|
||||
chunkSize = chunkDownloader.taskList[i].chunkLength - entry.StartOffset
|
||||
} else if i == entry.EndChunk {
|
||||
chunkSize = entry.EndOffset
|
||||
} else if i > entry.StartChunk && i < entry.EndChunk {
|
||||
chunkSize = chunkDownloader.taskList[i].chunkLength
|
||||
} else {
|
||||
chunkSize = 1 // the size of extra chunk beyond EndChunk
|
||||
}
|
||||
count := 0
|
||||
for count < chunkSize {
|
||||
n := chunkSize - count
|
||||
if n > cap(buffer) {
|
||||
n = cap(buffer)
|
||||
}
|
||||
n, err := existingFile.Read(buffer[:n])
|
||||
if n > 0 {
|
||||
hasher.Write(buffer[:n])
|
||||
fileHasher.Write(buffer[:n])
|
||||
count += n
|
||||
}
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_SPLIT", "Failed to read existing file: %v", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
if count > 0 {
|
||||
hash := string(hasher.Sum(nil))
|
||||
existingChunks = append(existingChunks, hash)
|
||||
existingLengths = append(existingLengths, chunkSize)
|
||||
offsetMap[hash] = offset
|
||||
lengthMap[hash] = chunkSize
|
||||
offset += int64(chunkSize)
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
fileHash = hex.EncodeToString(fileHasher.Sum(nil))
|
||||
} else {
|
||||
// If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so
|
||||
// we run the chunk maker to split the original file.
|
||||
chunkMaker.ForEachChunk(
|
||||
existingFile,
|
||||
func(chunk *Chunk, final bool) {
|
||||
@@ -1132,22 +1274,11 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
fileHash = hash
|
||||
return nil, false
|
||||
})
|
||||
}
|
||||
if fileHash == entry.Hash && fileHash != "" {
|
||||
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
||||
return false
|
||||
}
|
||||
|
||||
if !overwrite {
|
||||
LOG_ERROR("DOWNLOAD_OVERWRITE",
|
||||
"File %s already exists. Please specify the -overwrite option to continue", entry.Path)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if inPlace {
|
||||
if existingFile == nil {
|
||||
inPlace = false
|
||||
}
|
||||
}
|
||||
|
||||
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
|
||||
@@ -1162,12 +1293,21 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
|
||||
LOG_TRACE("DOWNLOAD_INPLACE", "Updating %s in place", fullPath)
|
||||
|
||||
if existingFile == nil {
|
||||
// Create an empty file
|
||||
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing", fullPath)
|
||||
}
|
||||
} else {
|
||||
// Close and reopen in a different mode
|
||||
existingFile.Close()
|
||||
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
existingFile.Seek(0, 0)
|
||||
|
||||
@@ -1312,7 +1452,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
if existingFile != nil {
|
||||
existingFile.Close()
|
||||
existingFile = nil
|
||||
@@ -1349,14 +1488,26 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
return false
|
||||
}
|
||||
|
||||
revisionMap := make(map[int]bool)
|
||||
if snapshotID == "" && len(revisionsToBeCopied) > 0 {
|
||||
LOG_ERROR("SNAPSHOT_ERROR", "You must specify the snapshot id when one or more revisions are specified.")
|
||||
return false
|
||||
}
|
||||
|
||||
revisionMap := make(map[string]map[int]bool)
|
||||
|
||||
_, found := revisionMap[snapshotID]
|
||||
if !found {
|
||||
revisionMap[snapshotID] = make(map[int]bool)
|
||||
}
|
||||
|
||||
for _, revision := range revisionsToBeCopied {
|
||||
revisionMap[revision] = true
|
||||
revisionMap[snapshotID][revision] = true
|
||||
}
|
||||
|
||||
var snapshots []*Snapshot
|
||||
var snapshotIDs []string
|
||||
var err error
|
||||
|
||||
if snapshotID == "" {
|
||||
snapshotIDs, err = manager.SnapshotManager.ListSnapshotIDs()
|
||||
if err != nil {
|
||||
@@ -1368,6 +1519,10 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
}
|
||||
|
||||
for _, id := range snapshotIDs {
|
||||
_, found := revisionMap[id]
|
||||
if !found {
|
||||
revisionMap[id] = make(map[int]bool)
|
||||
}
|
||||
revisions, err := manager.SnapshotManager.ListSnapshotRevisions(id)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", id, err)
|
||||
@@ -1376,9 +1531,14 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
|
||||
for _, revision := range revisions {
|
||||
if len(revisionsToBeCopied) > 0 {
|
||||
if _, found := revisionMap[revision]; !found {
|
||||
if _, found := revisionMap[id][revision]; found {
|
||||
revisionMap[id][revision] = true
|
||||
} else {
|
||||
revisionMap[id][revision] = false
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
revisionMap[id][revision] = true
|
||||
}
|
||||
|
||||
snapshotPath := fmt.Sprintf("snapshots/%s/%d", id, revision)
|
||||
@@ -1390,32 +1550,51 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
}
|
||||
|
||||
if exist {
|
||||
LOG_INFO("SNAPSHOT_EXIST", "Snapshot %s at revision %d already exists in the destination storage",
|
||||
LOG_INFO("SNAPSHOT_EXIST", "Snapshot %s at revision %d already exists at the destination storage",
|
||||
id, revision)
|
||||
revisionMap[id][revision] = false
|
||||
continue
|
||||
}
|
||||
|
||||
snapshot := manager.SnapshotManager.DownloadSnapshot(id, revision)
|
||||
snapshots = append(snapshots, snapshot)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(snapshots) == 0 {
|
||||
LOG_INFO("SNAPSHOT_COPY", "Nothing to copy, all snapshot revisions exist at the destination.")
|
||||
return true
|
||||
}
|
||||
|
||||
chunks := make(map[string]bool)
|
||||
otherChunks := make(map[string]bool)
|
||||
|
||||
for _, snapshot := range snapshots {
|
||||
|
||||
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
||||
continue
|
||||
}
|
||||
|
||||
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
||||
|
||||
for _, chunkHash := range snapshot.FileSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.LengthSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
}
|
||||
|
||||
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
|
||||
err := snapshot.LoadChunks(description)
|
||||
@@ -1426,46 +1605,92 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkHashes {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
otherChunkFiles, otherChunkSizes := otherManager.SnapshotManager.ListAllFiles(otherManager.storage, "chunks/")
|
||||
|
||||
for i, otherChunkID := range otherChunkFiles {
|
||||
otherChunkID = strings.Replace(otherChunkID, "/", "", -1)
|
||||
if len(otherChunkID) != 64 {
|
||||
continue
|
||||
}
|
||||
if otherChunkSizes[i] == 0 {
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Chunk %s has length = 0", otherChunkID)
|
||||
continue
|
||||
}
|
||||
otherChunks[otherChunkID] = false
|
||||
}
|
||||
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Found %d chunks on destination storage", len(otherChunks))
|
||||
|
||||
chunksToCopy := 0
|
||||
chunksToSkip := 0
|
||||
|
||||
for chunkHash, _ := range chunks {
|
||||
otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||
if _, found := otherChunks[otherChunkID]; found {
|
||||
chunksToSkip++
|
||||
} else {
|
||||
chunksToCopy++
|
||||
}
|
||||
}
|
||||
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Chunks to copy = %d, to skip = %d, total = %d", chunksToCopy, chunksToSkip, chunksToCopy+chunksToSkip)
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Total chunks in source snapshot revisions = %d\n", len(chunks))
|
||||
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads)
|
||||
|
||||
chunkUploader := CreateChunkUploader(otherManager.config, otherManager.storage, nil, threads,
|
||||
func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||
if skipped {
|
||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) exists in the destination", chunk.GetID(), chunkIndex, len(chunks))
|
||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) exists at the destination", chunk.GetID(), chunkIndex, len(chunks))
|
||||
} else {
|
||||
LOG_INFO("SNAPSHOT_COPY", "Copied chunk %s (%d/%d)", chunk.GetID(), chunkIndex, len(chunks))
|
||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) copied to the destination", chunk.GetID(), chunkIndex, len(chunks))
|
||||
}
|
||||
otherManager.config.PutChunk(chunk)
|
||||
})
|
||||
|
||||
chunkUploader.Start()
|
||||
|
||||
totalCopied := 0
|
||||
totalSkipped := 0
|
||||
chunkIndex := 0
|
||||
|
||||
for chunkHash, _ := range chunks {
|
||||
chunkIndex++
|
||||
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
||||
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||
|
||||
if _, found := otherChunks[newChunkID]; !found {
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Copying chunk %s to %s", chunkID, newChunkID)
|
||||
|
||||
i := chunkDownloader.AddChunk(chunkHash)
|
||||
chunk := chunkDownloader.WaitForChunk(i)
|
||||
newChunk := otherManager.config.GetChunk()
|
||||
newChunk.Reset(true)
|
||||
newChunk.Write(chunk.GetBytes())
|
||||
chunkUploader.StartChunk(newChunk, chunkIndex)
|
||||
totalCopied++
|
||||
} else {
|
||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) skipped at the destination", chunkID, chunkIndex, len(chunks))
|
||||
totalSkipped++
|
||||
}
|
||||
}
|
||||
|
||||
chunkDownloader.Stop()
|
||||
chunkUploader.Stop()
|
||||
|
||||
LOG_INFO("SNAPSHOT_COPY", "Copy complete, %d total chunks, %d chunks copied, %d skipped", totalCopied+totalSkipped, totalCopied, totalSkipped)
|
||||
|
||||
for _, snapshot := range snapshots {
|
||||
otherManager.storage.CreateDirectory(0, fmt.Sprintf("snapshots/%s", manager.snapshotID))
|
||||
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
||||
continue
|
||||
}
|
||||
otherManager.storage.CreateDirectory(0, fmt.Sprintf("snapshots/%s", snapshot.ID))
|
||||
description, _ := snapshot.MarshalJSON()
|
||||
path := fmt.Sprintf("snapshots/%s/%d", manager.snapshotID, snapshot.Revision)
|
||||
path := fmt.Sprintf("snapshots/%s/%d", snapshot.ID, snapshot.Revision)
|
||||
otherManager.SnapshotManager.UploadFile(path, path, description)
|
||||
LOG_INFO("SNAPSHOT_COPY", "Copied snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
||||
}
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
crypto_rand "crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"math/rand"
|
||||
"encoding/hex"
|
||||
"time"
|
||||
"crypto/sha256"
|
||||
crypto_rand "crypto/rand"
|
||||
|
||||
"runtime/debug"
|
||||
)
|
||||
@@ -104,6 +104,27 @@ func modifyFile(path string, portion float32) {
|
||||
}
|
||||
}
|
||||
|
||||
func checkExistence(t *testing.T, path string, exists bool, isDir bool) {
|
||||
stat, err := os.Stat(path)
|
||||
if exists {
|
||||
if err != nil {
|
||||
t.Errorf("%s does not exist: %v", path, err)
|
||||
} else if isDir {
|
||||
if !stat.Mode().IsDir() {
|
||||
t.Errorf("%s is not a directory", path)
|
||||
}
|
||||
} else {
|
||||
if stat.Mode().IsDir() {
|
||||
t.Errorf("%s is not a file", path)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err == nil || !os.IsNotExist(err) {
|
||||
t.Errorf("%s may exist: %v", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func truncateFile(path string) {
|
||||
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
@@ -173,6 +194,9 @@ func TestBackupManager(t *testing.T) {
|
||||
|
||||
os.Mkdir(testDir+"/repository1", 0700)
|
||||
os.Mkdir(testDir+"/repository1/dir1", 0700)
|
||||
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
|
||||
os.Mkdir(testDir+"/repository2", 0700)
|
||||
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
|
||||
|
||||
maxFileSize := 1000000
|
||||
//maxFileSize := 200000
|
||||
@@ -203,26 +227,27 @@ func TestBackupManager(t *testing.T) {
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
if testFixedChunkSize {
|
||||
if !ConfigStorage(storage, 100, 64 * 1024, 64 * 1024, 64 * 1024, password, nil) {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil) {
|
||||
t.Errorf("Failed to initialize the storage")
|
||||
}
|
||||
} else {
|
||||
if !ConfigStorage(storage, 100, 64 * 1024, 256 * 1024, 16 * 1024, password, nil) {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil) {
|
||||
t.Errorf("Failed to initialize the storage")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager := CreateBackupManager("host1", storage, testDir, password)
|
||||
backupManager.SetupSnapshotCache("default")
|
||||
|
||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir+"/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/ nil)
|
||||
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
||||
@@ -241,10 +266,12 @@ func TestBackupManager(t *testing.T) {
|
||||
modifyFile(testDir+"/repository1/file2", 0.2)
|
||||
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
|
||||
|
||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir+"/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
@@ -254,11 +281,25 @@ func TestBackupManager(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Truncate file2 and add a few empty directories
|
||||
truncateFile(testDir + "/repository1/file2")
|
||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
|
||||
os.Mkdir(testDir+"/repository1/dir2", 0700)
|
||||
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
|
||||
os.Mkdir(testDir+"/repository1/dir4", 0700)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
// Create some directories and files under repository2 that will be deleted during restore
|
||||
os.Mkdir(testDir+"/repository2/dir5", 0700)
|
||||
os.Mkdir(testDir+"/repository2/dir5/dir6", 0700)
|
||||
os.Mkdir(testDir+"/repository2/dir7", 0700)
|
||||
createRandomFile(testDir+"/repository2/file4", 100)
|
||||
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir+"/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
/*deleteMode=*/true, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
@@ -268,11 +309,24 @@ func TestBackupManager(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// These files/dirs should not exist because deleteMode == true
|
||||
checkExistence(t, testDir+"/repository2/dir5", false, false)
|
||||
checkExistence(t, testDir+"/repository2/dir5/dir6", false, false)
|
||||
checkExistence(t, testDir+"/repository2/dir7", false, false)
|
||||
checkExistence(t, testDir+"/repository2/file4", false, false)
|
||||
checkExistence(t, testDir+"/repository2/dir5/file5", false, false)
|
||||
|
||||
// These empty dirs should exist
|
||||
checkExistence(t, testDir+"/repository2/dir2", true, true)
|
||||
checkExistence(t, testDir+"/repository2/dir2/dir3", true, true)
|
||||
checkExistence(t, testDir+"/repository2/dir4", true, true)
|
||||
|
||||
// Remove file2 and dir1/file3 and restore them from revision 3
|
||||
os.Remove(testDir + "/repository1/file2")
|
||||
os.Remove(testDir + "/repository1/dir1/file3")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Restore(testDir+"/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
||||
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
@@ -282,6 +336,30 @@ func TestBackupManager(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
numberOfSnapshots := backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||
if numberOfSnapshots != 3 {
|
||||
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
|
||||
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false)
|
||||
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||
if numberOfSnapshots != 2 {
|
||||
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false)
|
||||
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
|
||||
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false)
|
||||
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||
if numberOfSnapshots != 3 {
|
||||
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
|
||||
/*buf := make([]byte, 1<<16)
|
||||
runtime.Stack(buf, true)
|
||||
fmt.Printf("%s", buf)*/
|
||||
|
||||
@@ -1,23 +1,22 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"fmt"
|
||||
"hash"
|
||||
"bytes"
|
||||
"runtime"
|
||||
"crypto/cipher"
|
||||
"compress/zlib"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"compress/zlib"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"runtime"
|
||||
|
||||
"github.com/bkaradzic/go-lz4"
|
||||
|
||||
)
|
||||
|
||||
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||
@@ -132,7 +131,7 @@ func (chunk *Chunk) Write(p []byte) (int, error){
|
||||
|
||||
// GetHash returns the chunk hash.
|
||||
func (chunk *Chunk) GetHash() string {
|
||||
if (len(chunk.hash) == 0) {
|
||||
if len(chunk.hash) == 0 {
|
||||
chunk.hash = chunk.hasher.Sum(nil)
|
||||
}
|
||||
|
||||
@@ -154,6 +153,18 @@ func (chunk *Chunk) GetID() string {
|
||||
return chunk.id
|
||||
}
|
||||
|
||||
func (chunk *Chunk) VerifyID() {
|
||||
hasher := chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||
hasher.Write(chunk.buffer.Bytes())
|
||||
hash := hasher.Sum(nil)
|
||||
hasher = chunk.config.NewKeyedHasher(chunk.config.IDKey)
|
||||
hasher.Write([]byte(hash))
|
||||
chunkID := hex.EncodeToString(hasher.Sum(nil))
|
||||
if chunkID != chunk.GetID() {
|
||||
LOG_ERROR("CHUNK_ID", "The chunk id should be %s instead of %s, length: %d", chunkID, chunk.GetID(), len(chunk.buffer.Bytes()))
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
||||
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
|
||||
@@ -367,4 +378,3 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"bytes"
|
||||
crypto_rand "crypto/rand"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChunk(t *testing.T) {
|
||||
@@ -67,7 +67,6 @@ func TestChunk(t *testing.T) {
|
||||
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
@@ -45,8 +46,8 @@ type ChunkDownloader struct {
|
||||
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
|
||||
|
||||
startTime int64 // The time it starts downloading
|
||||
totalFileSize int64 // Total file size
|
||||
downloadedFileSize int64 // Downloaded file size
|
||||
totalChunkSize int64 // Total chunk size
|
||||
downloadedChunkSize int64 // Downloaded chunk size
|
||||
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
||||
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
||||
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
||||
@@ -95,7 +96,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
||||
downloader.taskList = nil
|
||||
lastChunkIndex := -1
|
||||
maximumChunks := 0
|
||||
downloader.totalFileSize = 0
|
||||
downloader.totalChunkSize = 0
|
||||
for _, file := range files {
|
||||
if file.Size == 0 {
|
||||
continue
|
||||
@@ -109,6 +110,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
||||
needed: false,
|
||||
}
|
||||
downloader.taskList = append(downloader.taskList, task)
|
||||
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
|
||||
} else {
|
||||
downloader.taskList[len(downloader.taskList)-1].needed = true
|
||||
}
|
||||
@@ -119,7 +121,6 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
||||
if file.EndChunk-file.StartChunk > maximumChunks {
|
||||
maximumChunks = file.EndChunk - file.StartChunk
|
||||
}
|
||||
downloader.totalFileSize += file.Size
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,12 +178,6 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := downloader.lastChunkIndex; i < chunkIndex; i++ {
|
||||
if !downloader.taskList[i].isDownloading {
|
||||
atomic.AddInt64(&downloader.downloadedFileSize, int64(downloader.taskList[i].chunkLength))
|
||||
}
|
||||
}
|
||||
|
||||
for i, _ := range downloader.completedTasks {
|
||||
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||
@@ -320,30 +315,56 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
|
||||
if !exist {
|
||||
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
||||
if err != nil {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||
} else {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||
}
|
||||
return false
|
||||
}
|
||||
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
|
||||
}
|
||||
|
||||
const MaxDownloadAttempts = 3
|
||||
for downloadAttempt := 0; ; downloadAttempt++ {
|
||||
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
|
||||
if err != nil {
|
||||
LOG_ERROR("UPLOAD_FATAL", "Failed to download the chunk %s: %v", chunkID, err)
|
||||
if err == io.ErrUnexpectedEOF && downloadAttempt < MaxDownloadAttempts {
|
||||
LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err)
|
||||
chunk.Reset(false)
|
||||
continue
|
||||
} else {
|
||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
|
||||
if err != nil {
|
||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||
if downloadAttempt < MaxDownloadAttempts {
|
||||
LOG_WARN("DOWNLOAD_RETRY", "Failed to decrypt the chunk %s: %v; retrying", chunkID, err)
|
||||
chunk.Reset(false)
|
||||
continue
|
||||
} else {
|
||||
LOG_ERROR("DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
actualChunkID := chunk.GetID()
|
||||
if actualChunkID != chunkID {
|
||||
LOG_FATAL("UPLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
||||
if downloadAttempt < MaxDownloadAttempts {
|
||||
LOG_WARN("DOWNLOAD_RETRY", "The chunk %s has a hash id of %s; retrying", chunkID, actualChunkID)
|
||||
chunk.Reset(false)
|
||||
continue
|
||||
} else {
|
||||
LOG_FATAL("DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
if len(cachedPath) > 0 {
|
||||
// Save a copy to the local snapshot cache
|
||||
@@ -353,21 +374,20 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
}
|
||||
}
|
||||
|
||||
if (downloader.showStatistics || IsTracing()) && downloader.totalFileSize > 0 {
|
||||
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
|
||||
|
||||
atomic.AddInt64(&downloader.downloadedFileSize, int64(chunk.GetLength()))
|
||||
downloadFileSize := atomic.LoadInt64(&downloader.downloadedFileSize)
|
||||
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
|
||||
|
||||
now := time.Now().Unix()
|
||||
if now <= downloader.startTime {
|
||||
now = downloader.startTime + 1
|
||||
}
|
||||
speed := downloadFileSize / (now - downloader.startTime)
|
||||
speed := downloadedChunkSize / (now - downloader.startTime)
|
||||
remainingTime := int64(0)
|
||||
if speed > 0 {
|
||||
remainingTime = (downloader.totalFileSize - downloadFileSize) / speed + 1
|
||||
remainingTime = (downloader.totalChunkSize-downloadedChunkSize)/speed + 1
|
||||
}
|
||||
percentage := float32(downloadFileSize * 1000 / downloader.totalFileSize)
|
||||
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
|
||||
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
|
||||
task.chunkIndex+1, chunk.GetLength(),
|
||||
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
|
||||
@@ -178,6 +178,9 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
||||
fileHasher = maker.config.NewFileHasher()
|
||||
isEOF = false
|
||||
}
|
||||
} else {
|
||||
endOfChunk(chunk, false)
|
||||
startNewChunk()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,9 +229,8 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
|
||||
if (!minimumReached) {
|
||||
if !minimumReached {
|
||||
|
||||
bytes := maker.minimumChunkSize
|
||||
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"bytes"
|
||||
crypto_rand "crypto/rand"
|
||||
"math/rand"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
|
||||
@@ -76,7 +76,7 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
|
||||
return buffers[i], true
|
||||
})
|
||||
|
||||
if (totalFileSize != int64(totalChunkSize)) {
|
||||
if totalFileSize != int64(totalChunkSize) {
|
||||
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
|
||||
}
|
||||
return chunks, totalChunkSize
|
||||
@@ -84,7 +84,6 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
|
||||
|
||||
func TestChunkMaker(t *testing.T) {
|
||||
|
||||
|
||||
//sizes := [...] int { 64 }
|
||||
sizes := [...]int{64, 256, 1024, 1024 * 10}
|
||||
|
||||
@@ -101,7 +100,7 @@ func TestChunkMaker(t *testing.T) {
|
||||
|
||||
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
|
||||
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
|
||||
32, 48, 64, 128, 256, 512, 1024, 2048, }
|
||||
32, 48, 64, 128, 256, 512, 1024, 2048}
|
||||
|
||||
//capacities := [...]int { 32 }
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
@@ -92,6 +92,11 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
|
||||
chunkSize := chunk.GetLength()
|
||||
chunkID := chunk.GetID()
|
||||
|
||||
// For a snapshot chunk, verify that its chunk id is correct
|
||||
if uploader.snapshotCache != nil {
|
||||
chunk.VerifyID()
|
||||
}
|
||||
|
||||
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
|
||||
// Save a copy to the local snapshot.
|
||||
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
||||
@@ -117,7 +122,7 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
|
||||
// Chunk deduplication by name in effect here.
|
||||
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
|
||||
|
||||
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, 0)
|
||||
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
|
||||
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
||||
return false
|
||||
}
|
||||
@@ -129,13 +134,17 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
|
||||
return false
|
||||
}
|
||||
|
||||
if !uploader.config.dryRun {
|
||||
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
|
||||
if err != nil {
|
||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
|
||||
} else {
|
||||
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
|
||||
}
|
||||
|
||||
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
|
||||
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
||||
return true
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
"path"
|
||||
"testing"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
crypto_rand "crypto/rand"
|
||||
"math/rand"
|
||||
@@ -56,7 +56,6 @@ func TestUploaderAndDownloader(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
numberOfChunks := 100
|
||||
maxChunkSize := 64 * 1024
|
||||
|
||||
@@ -102,9 +101,8 @@ func TestUploaderAndDownloader(t *testing.T) {
|
||||
|
||||
chunkUploader.Stop()
|
||||
|
||||
|
||||
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
||||
chunkDownloader.totalFileSize = int64(totalFileSize)
|
||||
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
||||
|
||||
for _, chunk := range chunks {
|
||||
chunkDownloader.AddChunk(chunk.GetHash())
|
||||
|
||||
@@ -1,22 +1,23 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"bytes"
|
||||
"os"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync/atomic"
|
||||
"crypto/rand"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
|
||||
blake2 "github.com/minio/blake2b-simd"
|
||||
)
|
||||
@@ -28,6 +29,15 @@ var DEFAULT_KEY = []byte("duplicacy")
|
||||
// standard zlib levels of -1 to 9.
|
||||
var DEFAULT_COMPRESSION_LEVEL = 100
|
||||
|
||||
// The new header of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||
var CONFIG_HEADER = "duplicacy\001"
|
||||
|
||||
// The length of the salt used in the new format
|
||||
var CONFIG_SALT_LENGTH = 32
|
||||
|
||||
// The default iterations for key derivation
|
||||
var CONFIG_DEFAULT_ITERATIONS = 16384
|
||||
|
||||
type Config struct {
|
||||
CompressionLevel int `json:"compression-level"`
|
||||
AverageChunkSize int `json:"average-chunk-size"`
|
||||
@@ -36,6 +46,8 @@ type Config struct {
|
||||
|
||||
ChunkSeed []byte `json:"chunk-seed"`
|
||||
|
||||
FixedNesting bool `json:"fixed-nesting"`
|
||||
|
||||
// Use HMAC-SHA256(hashKey, plaintext) as the chunk hash.
|
||||
// Use HMAC-SHA256(idKey, chunk hash) as the file name of the chunk
|
||||
// For chunks, use HMAC-SHA256(chunkKey, chunk hash) as the encryption key
|
||||
@@ -53,8 +65,9 @@ type Config struct {
|
||||
// for encrypting a non-chunk file
|
||||
FileKey []byte `json:"-"`
|
||||
|
||||
chunkPool chan *Chunk `json:"-"`
|
||||
chunkPool chan *Chunk
|
||||
numberOfChunks int32
|
||||
dryRun bool
|
||||
}
|
||||
|
||||
// Create an alias to avoid recursive calls on Config.MarshalJSON
|
||||
@@ -130,13 +143,14 @@ func (config *Config) Print() {
|
||||
}
|
||||
|
||||
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
||||
isEncrypted bool, copyFrom *Config) (config *Config) {
|
||||
isEncrypted bool, copyFrom *Config, bitCopy bool) (config *Config) {
|
||||
|
||||
config = &Config{
|
||||
CompressionLevel: compressionLevel,
|
||||
AverageChunkSize: averageChunkSize,
|
||||
MaximumChunkSize: maximumChunkSize,
|
||||
MinimumChunkSize: mininumChunkSize,
|
||||
FixedNesting: true,
|
||||
}
|
||||
|
||||
if isEncrypted {
|
||||
@@ -168,6 +182,12 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
||||
|
||||
config.ChunkSeed = copyFrom.ChunkSeed
|
||||
config.HashKey = copyFrom.HashKey
|
||||
|
||||
if bitCopy {
|
||||
config.IDKey = copyFrom.IDKey
|
||||
config.ChunkKey = copyFrom.ChunkKey
|
||||
config.FileKey = copyFrom.FileKey
|
||||
}
|
||||
}
|
||||
|
||||
config.chunkPool = make(chan *Chunk, runtime.NumCPU()*16)
|
||||
@@ -315,10 +335,45 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if len(configFile.GetBytes()) < len(ENCRYPTION_HEADER) {
|
||||
return nil, false, fmt.Errorf("The storage has an invalid config file")
|
||||
}
|
||||
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)-1]) == ENCRYPTION_HEADER[:len(ENCRYPTION_HEADER)-1] && len(password) == 0 {
|
||||
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
||||
}
|
||||
|
||||
var masterKey []byte
|
||||
|
||||
if len(password) > 0 {
|
||||
masterKey = GenerateKeyFromPassword(password)
|
||||
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)]) == ENCRYPTION_HEADER {
|
||||
// This is the old config format with a static salt and a fixed number of iterations
|
||||
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
|
||||
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
|
||||
} else if string(configFile.GetBytes()[:len(CONFIG_HEADER)]) == CONFIG_HEADER {
|
||||
// This is the new config format with a random salt and a configurable number of iterations
|
||||
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
|
||||
|
||||
// Extract the salt and the number of iterations
|
||||
saltStart := configFile.GetBytes()[len(CONFIG_HEADER):]
|
||||
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
|
||||
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
|
||||
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
|
||||
|
||||
// Copy to a temporary buffer to replace the header and remove the salt and the number of riterations
|
||||
var encrypted bytes.Buffer
|
||||
encrypted.Write([]byte(ENCRYPTION_HEADER))
|
||||
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
|
||||
|
||||
configFile.Reset(false)
|
||||
configFile.Write(encrypted.Bytes())
|
||||
if len(configFile.GetBytes()) != encryptedLength {
|
||||
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
|
||||
}
|
||||
} else {
|
||||
return nil, true, fmt.Errorf("The config file has an invalid header")
|
||||
}
|
||||
|
||||
// Decrypt the config file. masterKey == nil means no encryption.
|
||||
err = configFile.Decrypt(masterKey, "")
|
||||
@@ -330,23 +385,21 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
||||
config = CreateConfig()
|
||||
|
||||
err = json.Unmarshal(configFile.GetBytes(), config)
|
||||
|
||||
if err != nil {
|
||||
if bytes.Equal(configFile.GetBytes()[:9], []byte("duplicacy")) {
|
||||
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
||||
} else {
|
||||
return nil, false, fmt.Errorf("Failed to parse the config file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
storage.SetNestingLevels(config)
|
||||
|
||||
return config, false, nil
|
||||
|
||||
}
|
||||
|
||||
func UploadConfig(storage Storage, config *Config, password string) (bool) {
|
||||
func UploadConfig(storage Storage, config *Config, password string, iterations int) bool {
|
||||
|
||||
// This is the key to encrypt the config file.
|
||||
var masterKey []byte
|
||||
salt := make([]byte, CONFIG_SALT_LENGTH)
|
||||
|
||||
if len(password) > 0 {
|
||||
|
||||
@@ -355,7 +408,13 @@ func UploadConfig(storage Storage, config *Config, password string) (bool) {
|
||||
return false
|
||||
}
|
||||
|
||||
masterKey = GenerateKeyFromPassword(password)
|
||||
_, err := rand.Read(salt)
|
||||
if err != nil {
|
||||
LOG_ERROR("CONFIG_KEY", "Failed to generate random salt: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
masterKey = GenerateKeyFromPassword(password, salt, iterations)
|
||||
}
|
||||
|
||||
description, err := json.MarshalIndent(config, "", " ")
|
||||
@@ -372,11 +431,26 @@ func UploadConfig(storage Storage, config *Config, password string) (bool) {
|
||||
if len(password) > 0 {
|
||||
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
|
||||
err = chunk.Encrypt(masterKey, "")
|
||||
|
||||
if err != nil {
|
||||
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// The new encrypted format for config is CONFIG_HEADER + salt + #iterations + encrypted content
|
||||
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
|
||||
|
||||
// Copy to a temporary buffer to replace the header and add the salt and the number of iterations
|
||||
var encrypted bytes.Buffer
|
||||
encrypted.Write([]byte(CONFIG_HEADER))
|
||||
encrypted.Write(salt)
|
||||
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
|
||||
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_HEADER):])
|
||||
|
||||
chunk.Reset(false)
|
||||
chunk.Write(encrypted.Bytes())
|
||||
if len(chunk.GetBytes()) != encryptedLength {
|
||||
LOG_ERROR("CONFIG_CREATE", "Encrypted config has %d bytes instead of expected %d bytes", len(chunk.GetBytes()), encryptedLength)
|
||||
}
|
||||
}
|
||||
|
||||
err = storage.UploadFile(0, "config", chunk.GetBytes())
|
||||
@@ -402,8 +476,8 @@ func UploadConfig(storage Storage, config *Config, password string) (bool) {
|
||||
// ConfigStorage makes the general storage space available for storing duplicacy format snapshots. In essence,
|
||||
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
||||
// is enabled.
|
||||
func ConfigStorage(storage Storage, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||
minimumChunkSize int, password string, copyFrom *Config) bool {
|
||||
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool) bool {
|
||||
|
||||
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||
if err != nil {
|
||||
@@ -416,12 +490,11 @@ func ConfigStorage(storage Storage, compressionLevel int, averageChunkSize int,
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
|
||||
copyFrom)
|
||||
copyFrom, bitCopy)
|
||||
if config == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return UploadConfig(storage, config, password)
|
||||
return UploadConfig(storage, config, password, iterations)
|
||||
}
|
||||
|
||||
@@ -1,25 +1,26 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/gilbertchen/go-dropbox"
|
||||
)
|
||||
|
||||
type DropboxStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
clients []*dropbox.Files
|
||||
minimumNesting int // The minimum level of directories to dive into before searching for the chunk file.
|
||||
storageDir string
|
||||
}
|
||||
|
||||
// CreateDropboxStorage creates a dropbox storage object.
|
||||
func CreateDropboxStorage(accessToken string, storageDir string, threads int) (storage *DropboxStorage, err error) {
|
||||
func CreateDropboxStorage(accessToken string, storageDir string, minimumNesting int, threads int) (storage *DropboxStorage, err error) {
|
||||
|
||||
var clients []*dropbox.Files
|
||||
for i := 0; i < threads; i++ {
|
||||
@@ -38,6 +39,7 @@ func CreateDropboxStorage(accessToken string, storageDir string, threads int) (s
|
||||
storage = &DropboxStorage{
|
||||
clients: clients,
|
||||
storageDir: storageDir,
|
||||
minimumNesting: minimumNesting,
|
||||
}
|
||||
|
||||
err = storage.CreateDirectory(0, "")
|
||||
@@ -45,6 +47,8 @@ func CreateDropboxStorage(accessToken string, storageDir string, threads int) (s
|
||||
return nil, fmt.Errorf("Can't create storage directory: %v", err)
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{1}, 1)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
@@ -85,7 +89,7 @@ func (storage *DropboxStorage) ListFiles(threadIndex int, dir string) (files []s
|
||||
|
||||
if output.HasMore {
|
||||
output, err = storage.clients[threadIndex].ListFolderContinue(
|
||||
&dropbox.ListFolderContinueInput { Cursor: output.Cursor, })
|
||||
&dropbox.ListFolderContinueInput{Cursor: output.Cursor})
|
||||
|
||||
} else {
|
||||
break
|
||||
@@ -178,66 +182,6 @@ func (storage *DropboxStorage) GetFileInfo(threadIndex int, filePath string) (ex
|
||||
return true, output.Tag == "folder", int64(output.Size), nil
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *DropboxStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
dir := "/chunks"
|
||||
|
||||
suffix := ""
|
||||
if isFossil {
|
||||
suffix = ".fsl"
|
||||
}
|
||||
|
||||
// The minimum level of directories to dive into before searching for the chunk file.
|
||||
minimumLevel := 1
|
||||
|
||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
||||
if level >= minimumLevel {
|
||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
||||
var size int64
|
||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
if exist {
|
||||
return filePath, exist, size, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Find the subdirectory the chunk file may reside.
|
||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
||||
exist, _, _, err = storage.GetFileInfo(threadIndex, subDir)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
|
||||
if exist {
|
||||
dir = subDir
|
||||
continue
|
||||
}
|
||||
|
||||
if level < minimumLevel {
|
||||
// Create the subdirectory if it doesn't exist.
|
||||
err = storage.CreateDirectory(threadIndex, subDir)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
|
||||
dir = subDir
|
||||
continue
|
||||
}
|
||||
|
||||
// Teh chunk must be under this subdirectory but it doesn't exist.
|
||||
return path.Join(dir, chunkID[2 * level:])[1:] + suffix, false, 0, nil
|
||||
|
||||
}
|
||||
|
||||
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
||||
chunkID)
|
||||
return "", false, 0, nil
|
||||
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
@@ -281,16 +225,16 @@ func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, cont
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *DropboxStorage) IsCacheNeeded() (bool) { return true }
|
||||
func (storage *DropboxStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *DropboxStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *DropboxStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *DropboxStorage) IsStrongConsistent() (bool) { return false }
|
||||
func (storage *DropboxStorage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *DropboxStorage) IsFastListing() (bool) { return false }
|
||||
func (storage *DropboxStorage) IsFastListing() bool { return false }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *DropboxStorage) EnableTestMode() {}
|
||||
|
||||
@@ -1,25 +1,23 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
"encoding/json"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
|
||||
// This is the hidden directory in the repository for storing various files.
|
||||
var DUPLICACY_DIRECTORY = ".duplicacy"
|
||||
var DUPLICACY_FILE = ".duplicacy"
|
||||
@@ -288,7 +286,7 @@ func (entry *Entry) String(maxSizeDigits int) string {
|
||||
return fmt.Sprintf("%*d %s %64s %s", maxSizeDigits, entry.Size, modifiedTime, entry.Hash, entry.Path)
|
||||
}
|
||||
|
||||
func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo) bool {
|
||||
func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setOwner bool) bool {
|
||||
|
||||
if fileInfo == nil {
|
||||
stat, err := os.Stat(fullPath)
|
||||
@@ -320,9 +318,12 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo) bool
|
||||
entry.SetAttributesToFile(fullPath)
|
||||
}
|
||||
|
||||
if setOwner {
|
||||
return SetOwner(fullPath, entry, fileInfo)
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
|
||||
// Files are always arranged before subdirectories under the same parent directory.
|
||||
@@ -489,7 +490,14 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string
|
||||
skippedFiles = append(skippedFiles, entry.Path)
|
||||
continue
|
||||
}
|
||||
entry = CreateEntryFromFileInfo(stat, "")
|
||||
|
||||
newEntry := CreateEntryFromFileInfo(stat, "")
|
||||
if runtime.GOOS == "windows" {
|
||||
// On Windows, stat.Name() is the last component of the target, so we need to construct the correct
|
||||
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
||||
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
||||
}
|
||||
entry = newEntry
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEntrySort(t *testing.T) {
|
||||
@@ -107,7 +107,6 @@ func TestEntryList(t *testing.T) {
|
||||
"ab3/c",
|
||||
}
|
||||
|
||||
|
||||
var entry1, entry2 *Entry
|
||||
|
||||
for i, p1 := range DATA {
|
||||
@@ -217,4 +216,3 @@ func TestEntryList(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
@@ -21,7 +21,7 @@ type FileReader struct {
|
||||
}
|
||||
|
||||
// CreateFileReader creates a file reader.
|
||||
func CreateFileReader(top string, files[] *Entry) (*FileReader) {
|
||||
func CreateFileReader(top string, files []*Entry) *FileReader {
|
||||
|
||||
reader := &FileReader{
|
||||
top: top,
|
||||
@@ -68,7 +68,3 @@ func (reader *FileReader) NextFile() bool{
|
||||
reader.CurrentFile = nil
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,38 +1,44 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"path"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileStorage is a local on-disk file storage implementing the Storage interface.
|
||||
type FileStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
isCacheNeeded bool // Network storages require caching
|
||||
storageDir string
|
||||
numberOfThreads int
|
||||
}
|
||||
|
||||
// CreateFileStorage creates a file storage.
|
||||
func CreateFileStorage(storageDir string, threads int) (storage *FileStorage, err error) {
|
||||
func CreateFileStorage(storageDir string, isCacheNeeded bool, threads int) (storage *FileStorage, err error) {
|
||||
|
||||
var stat os.FileInfo
|
||||
|
||||
stat, err = os.Stat(storageDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(storageDir, 0744)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if !stat.IsDir() {
|
||||
return nil, fmt.Errorf("The storage path %s is a file", storageDir)
|
||||
@@ -45,12 +51,15 @@ func CreateFileStorage(storageDir string, threads int) (storage *FileStorage, er
|
||||
|
||||
storage = &FileStorage{
|
||||
storageDir: storageDir,
|
||||
isCacheNeeded: isCacheNeeded,
|
||||
numberOfThreads: threads,
|
||||
}
|
||||
|
||||
// Random number fo generating the temporary chunk file suffix.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
@@ -118,68 +127,6 @@ func (storage *FileStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
||||
return true, stat.IsDir(), stat.Size(), nil
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with the
|
||||
// suffix '.fsl'.
|
||||
func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
dir := path.Join(storage.storageDir, "chunks")
|
||||
|
||||
suffix := ""
|
||||
if isFossil {
|
||||
suffix = ".fsl"
|
||||
}
|
||||
|
||||
// The minimum level of directories to dive into before searching for the chunk file.
|
||||
minimumLevel := 2
|
||||
|
||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
||||
if level >= minimumLevel {
|
||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
||||
if stat, err := os.Stat(filePath); err == nil && !stat.IsDir() {
|
||||
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
||||
} else if err == nil && stat.IsDir() {
|
||||
return filePath[len(storage.storageDir) + 1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||
}
|
||||
}
|
||||
|
||||
// Find the subdirectory the chunk file may reside.
|
||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
||||
stat, err := os.Stat(subDir)
|
||||
if err == nil && stat.IsDir() {
|
||||
dir = subDir
|
||||
continue
|
||||
}
|
||||
|
||||
if level < minimumLevel {
|
||||
// Create the subdirectory if it doesn't exist.
|
||||
|
||||
if err == nil && !stat.IsDir() {
|
||||
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
|
||||
}
|
||||
|
||||
err = os.Mkdir(subDir, 0744)
|
||||
if err != nil {
|
||||
// The directory may have been created by other threads so check it again.
|
||||
stat, _ := os.Stat(subDir)
|
||||
if stat == nil || !stat.IsDir() {
|
||||
return "", false, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
dir = subDir
|
||||
continue
|
||||
}
|
||||
|
||||
// The chunk must be under this subdirectory but it doesn't exist.
|
||||
return path.Join(dir, chunkID[2 * level:])[len(storage.storageDir) + 1:] + suffix, false, 0, nil
|
||||
|
||||
}
|
||||
|
||||
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
||||
chunkID)
|
||||
return "", false, 0, nil
|
||||
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *FileStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
@@ -203,6 +150,26 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
fullPath := path.Join(storage.storageDir, filePath)
|
||||
|
||||
if len(strings.Split(filePath, "/")) > 2 {
|
||||
dir := path.Dir(fullPath)
|
||||
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
|
||||
// really a need to follow the link if filePath is a link.
|
||||
stat, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(dir, 0744)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !stat.IsDir() {
|
||||
fmt.Errorf("The path %s is not a directory", dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||
suffix := make([]byte, 8)
|
||||
for i := range suffix {
|
||||
@@ -241,16 +208,16 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *FileStorage) IsCacheNeeded () (bool) { return false }
|
||||
func (storage *FileStorage) IsCacheNeeded() bool { return storage.isCacheNeeded }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *FileStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *FileStorage) IsStrongConsistent() (bool) { return true }
|
||||
func (storage *FileStorage) IsStrongConsistent() bool { return true }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *FileStorage) IsFastListing() (bool) { return false }
|
||||
func (storage *FileStorage) IsFastListing() bool { return false }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *FileStorage) EnableTestMode() {}
|
||||
|
||||
@@ -1,22 +1,22 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"path"
|
||||
"time"
|
||||
"sync"
|
||||
"strings"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"encoding/json"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
@@ -25,16 +25,18 @@ import (
|
||||
)
|
||||
|
||||
type GCDStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
service *drive.Service
|
||||
idCache map[string]string
|
||||
idCacheLock *sync.Mutex
|
||||
backoff int
|
||||
idCacheLock sync.Mutex
|
||||
backoffs []int // desired backoff time in seconds for each thread
|
||||
attempts []int // number of failed attempts since last success for each thread
|
||||
|
||||
createDirectoryLock sync.Mutex
|
||||
isConnected bool
|
||||
numberOfThreads int
|
||||
TestMode bool
|
||||
|
||||
}
|
||||
|
||||
type GCDConfig struct {
|
||||
@@ -44,12 +46,19 @@ type GCDConfig struct {
|
||||
Token oauth2.Token `json:"token"`
|
||||
}
|
||||
|
||||
func (storage *GCDStorage) shouldRetry(err error) (bool, error) {
|
||||
func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error) {
|
||||
|
||||
const MAX_ATTEMPTS = 15
|
||||
|
||||
maximumBackoff := 64
|
||||
if maximumBackoff < storage.numberOfThreads {
|
||||
maximumBackoff = storage.numberOfThreads
|
||||
}
|
||||
retry := false
|
||||
message := ""
|
||||
if err == nil {
|
||||
storage.backoff = 1
|
||||
storage.backoffs[threadIndex] = 1
|
||||
storage.attempts[threadIndex] = 0
|
||||
return false, nil
|
||||
} else if e, ok := err.(*googleapi.Error); ok {
|
||||
if 500 <= e.Code && e.Code < 600 {
|
||||
@@ -62,8 +71,14 @@ func (storage *GCDStorage) shouldRetry(err error) (bool, error) {
|
||||
retry = true
|
||||
} else if e.Code == 403 {
|
||||
// User Rate Limit Exceeded
|
||||
message = "User Rate Limit Exceeded"
|
||||
message = e.Message
|
||||
retry = true
|
||||
} else if e.Code == 401 {
|
||||
// Only retry on authorization error when storage has been connected before
|
||||
if storage.isConnected {
|
||||
message = "Authorization Error"
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
} else if e, ok := err.(*url.Error); ok {
|
||||
message = e.Error()
|
||||
@@ -77,19 +92,37 @@ func (storage *GCDStorage) shouldRetry(err error) (bool, error) {
|
||||
retry = err.Temporary()
|
||||
}
|
||||
|
||||
if !retry || storage.backoff >= 256{
|
||||
storage.backoff = 1
|
||||
if !retry {
|
||||
return false, err
|
||||
}
|
||||
|
||||
delay := float32(storage.backoff) * rand.Float32()
|
||||
LOG_DEBUG("GCD_RETRY", "%s; retrying after %.2f seconds", message, delay)
|
||||
time.Sleep(time.Duration(float32(storage.backoff) * float32(time.Second)))
|
||||
storage.backoff *= 2
|
||||
if storage.attempts[threadIndex] >= MAX_ATTEMPTS {
|
||||
LOG_INFO("GCD_RETRY", "[%d] Maximum number of retries reached (backoff: %d, attempts: %d)",
|
||||
threadIndex, storage.backoffs[threadIndex], storage.attempts[threadIndex])
|
||||
storage.backoffs[threadIndex] = 1
|
||||
storage.attempts[threadIndex] = 0
|
||||
return false, err
|
||||
}
|
||||
|
||||
if storage.backoffs[threadIndex] < maximumBackoff {
|
||||
storage.backoffs[threadIndex] *= 2
|
||||
}
|
||||
if storage.backoffs[threadIndex] > maximumBackoff {
|
||||
storage.backoffs[threadIndex] = maximumBackoff
|
||||
}
|
||||
storage.attempts[threadIndex] += 1
|
||||
delay := float64(storage.backoffs[threadIndex]) * rand.Float64() * 2
|
||||
LOG_DEBUG("GCD_RETRY", "[%d] %s; retrying after %.2f seconds (backoff: %d, attempts: %d)",
|
||||
threadIndex, message, delay, storage.backoffs[threadIndex], storage.attempts[threadIndex])
|
||||
time.Sleep(time.Duration(delay * float64(time.Second)))
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (storage *GCDStorage) convertFilePath(filePath string) (string) {
|
||||
// convertFilePath converts the path for a fossil in the form of 'chunks/id.fsl' to 'fossils/id'. This is because
|
||||
// GCD doesn't support file renaming. Instead, it only allows one file to be moved from one directory to another.
|
||||
// By adding a layer of path conversion we're pretending that we can rename between 'chunks/id' and 'chunks/id.fsl'
|
||||
func (storage *GCDStorage) convertFilePath(filePath string) string {
|
||||
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||
}
|
||||
@@ -122,7 +155,7 @@ func (storage *GCDStorage) deletePathID(path string) {
|
||||
storage.idCacheLock.Unlock()
|
||||
}
|
||||
|
||||
func (storage *GCDStorage) listFiles(parentID string, listFiles bool) ([]*drive.File, error) {
|
||||
func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles bool, listDirectories bool) ([]*drive.File, error) {
|
||||
|
||||
if parentID == "" {
|
||||
return nil, fmt.Errorf("No parent ID provided")
|
||||
@@ -132,11 +165,11 @@ func (storage *GCDStorage) listFiles(parentID string, listFiles bool) ([]*drive.
|
||||
|
||||
startToken := ""
|
||||
|
||||
query := "'" + parentID + "' in parents and "
|
||||
if listFiles {
|
||||
query += "mimeType != 'application/vnd.google-apps.folder'"
|
||||
} else {
|
||||
query += "mimeType = 'application/vnd.google-apps.folder'"
|
||||
query := "'" + parentID + "' in parents "
|
||||
if listFiles && !listDirectories {
|
||||
query += "and mimeType != 'application/vnd.google-apps.folder'"
|
||||
} else if !listFiles && !listDirectories {
|
||||
query += "and mimeType = 'application/vnd.google-apps.folder'"
|
||||
}
|
||||
|
||||
maxCount := int64(1000)
|
||||
@@ -150,7 +183,7 @@ func (storage *GCDStorage) listFiles(parentID string, listFiles bool) ([]*drive.
|
||||
|
||||
for {
|
||||
fileList, err = storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Do()
|
||||
if retry, e := storage.shouldRetry(err); e == nil && !retry {
|
||||
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
continue
|
||||
@@ -167,11 +200,10 @@ func (storage *GCDStorage) listFiles(parentID string, listFiles bool) ([]*drive.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (storage *GCDStorage) listByName(parentID string, name string) (string, bool, int64, error) {
|
||||
func (storage *GCDStorage) listByName(threadIndex int, parentID string, name string) (string, bool, int64, error) {
|
||||
|
||||
var fileList *drive.FileList
|
||||
var err error
|
||||
@@ -180,7 +212,7 @@ func (storage *GCDStorage) listByName(parentID string, name string) (string, boo
|
||||
query := "name = '" + name + "' and '" + parentID + "' in parents"
|
||||
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
|
||||
|
||||
if retry, e := storage.shouldRetry(err); e == nil && !retry {
|
||||
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
continue
|
||||
@@ -198,7 +230,14 @@ func (storage *GCDStorage) listByName(parentID string, name string) (string, boo
|
||||
return file.Id, file.MimeType == "application/vnd.google-apps.folder", file.Size, nil
|
||||
}
|
||||
|
||||
func (storage *GCDStorage) getIDFromPath(path string) (string, error) {
|
||||
// getIDFromPath returns the id of the given path. If 'createDirectories' is true, create the given path and all its
|
||||
// parent directories if they don't exist. Note that if 'createDirectories' is false, it may return an empty 'fileID'
|
||||
// if the file doesn't exist.
|
||||
func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, createDirectories bool) (string, error) {
|
||||
|
||||
if fileID, ok := storage.findPathID(filePath); ok {
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
fileID := "root"
|
||||
|
||||
@@ -206,33 +245,49 @@ func (storage *GCDStorage) getIDFromPath(path string) (string, error) {
|
||||
fileID = rootID
|
||||
}
|
||||
|
||||
names := strings.Split(path, "/")
|
||||
names := strings.Split(filePath, "/")
|
||||
current := ""
|
||||
for i, name := range names {
|
||||
|
||||
if len(current) == 0 {
|
||||
current = name
|
||||
} else {
|
||||
current = current + "/" + name
|
||||
}
|
||||
|
||||
// Find the intermediate directory in the cache first.
|
||||
current = path.Join(current, name)
|
||||
currentID, ok := storage.findPathID(current)
|
||||
if ok {
|
||||
fileID = currentID
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if the directory exists.
|
||||
var err error
|
||||
var isDir bool
|
||||
fileID, isDir, _, err = storage.listByName(fileID, name)
|
||||
fileID, isDir, _, err = storage.listByName(threadIndex, fileID, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if fileID == "" {
|
||||
return "", fmt.Errorf("Path %s doesn't exist", path)
|
||||
if !createDirectories {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Only one thread can create the directory at a time -- GCD allows multiple directories
|
||||
// to have the same name but different ids.
|
||||
storage.createDirectoryLock.Lock()
|
||||
err = storage.CreateDirectory(threadIndex, current)
|
||||
storage.createDirectoryLock.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to create directory '%s': %v", current, err)
|
||||
}
|
||||
currentID, ok = storage.findPathID(current)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("Directory '%s' created by id not found", current)
|
||||
}
|
||||
fileID = currentID
|
||||
continue
|
||||
} else {
|
||||
storage.savePathID(current, fileID)
|
||||
}
|
||||
if i != len(names)-1 && !isDir {
|
||||
return "", fmt.Errorf("Invalid path %s", path)
|
||||
return "", fmt.Errorf("Path '%s' is not a directory", current)
|
||||
}
|
||||
}
|
||||
return fileID, nil
|
||||
@@ -251,13 +306,13 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := oauth2.Config{
|
||||
oauth2Config := oauth2.Config{
|
||||
ClientID: gcdConfig.ClientID,
|
||||
ClientSecret: gcdConfig.ClientSecret,
|
||||
Endpoint: gcdConfig.Endpoint,
|
||||
}
|
||||
|
||||
authClient := config.Client(context.Background(), &gcdConfig.Token)
|
||||
authClient := oauth2Config.Client(context.Background(), &gcdConfig.Token)
|
||||
|
||||
service, err := drive.New(authClient)
|
||||
if err != nil {
|
||||
@@ -268,10 +323,16 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
service: service,
|
||||
numberOfThreads: threads,
|
||||
idCache: make(map[string]string),
|
||||
idCacheLock: &sync.Mutex{},
|
||||
backoffs: make([]int, threads),
|
||||
attempts: make([]int, threads),
|
||||
}
|
||||
|
||||
storagePathID, err := storage.getIDFromPath(storagePath)
|
||||
for i := range storage.backoffs {
|
||||
storage.backoffs[i] = 1
|
||||
storage.attempts[i] = 0
|
||||
}
|
||||
|
||||
storagePathID, err := storage.getIDFromPath(0, storagePath, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -279,7 +340,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
storage.idCache[""] = storagePathID
|
||||
|
||||
for _, dir := range []string{"chunks", "snapshots", "fossils"} {
|
||||
dirID, isDir, _, err := storage.listByName(storagePathID, dir)
|
||||
dirID, isDir, _, err := storage.listByName(0, storagePathID, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -289,14 +350,17 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
return nil, err
|
||||
}
|
||||
} else if !isDir {
|
||||
return nil, fmt.Errorf("%s/%s is not a directory", storagePath + "/" + dir)
|
||||
return nil, fmt.Errorf("%s/%s is not a directory", storagePath, dir)
|
||||
} else {
|
||||
storage.idCache[dir] = dirID
|
||||
}
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
storage.isConnected = true
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
@@ -307,7 +371,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
|
||||
if dir == "snapshots" {
|
||||
|
||||
files, err := storage.listFiles(storage.getPathID(dir), false)
|
||||
files, err := storage.listFiles(threadIndex, storage.getPathID(dir), false, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -320,12 +384,15 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
}
|
||||
return subDirs, nil, nil
|
||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||
pathID, err := storage.getIDFromPath(dir)
|
||||
pathID, err := storage.getIDFromPath(threadIndex, dir, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if pathID == "" {
|
||||
return nil, nil, fmt.Errorf("Path '%s' does not exist", dir)
|
||||
}
|
||||
|
||||
entries, err := storage.listFiles(pathID, true)
|
||||
entries, err := storage.listFiles(threadIndex, pathID, true, false)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -341,20 +408,33 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
files := []string{}
|
||||
sizes := []int64{}
|
||||
|
||||
for _, parent := range []string { "chunks", "fossils" } {
|
||||
entries, err := storage.listFiles(storage.getPathID(parent), true)
|
||||
parents := []string{"chunks", "fossils"}
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := parents[i]
|
||||
pathID, ok := storage.findPathID(parent)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
entries, err := storage.listFiles(threadIndex, pathID, true, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.MimeType != "application/vnd.google-apps.folder" {
|
||||
name := entry.Name
|
||||
if parent == "fossils" {
|
||||
name += ".fsl"
|
||||
if strings.HasPrefix(parent, "fossils") {
|
||||
name = parent + "/" + name + ".fsl"
|
||||
name = name[len("fossils/"):]
|
||||
} else {
|
||||
name = parent + "/" + name
|
||||
name = name[len("chunks/"):]
|
||||
}
|
||||
storage.savePathID(parent + "/" + entry.Name, entry.Id)
|
||||
files = append(files, name)
|
||||
sizes = append(sizes, entry.Size)
|
||||
} else {
|
||||
parents = append(parents, parent+"/"+entry.Name)
|
||||
}
|
||||
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||
}
|
||||
}
|
||||
return files, sizes, nil
|
||||
@@ -365,18 +445,15 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
filePath = storage.convertFilePath(filePath)
|
||||
fileID, ok := storage.findPathID(filePath)
|
||||
if !ok {
|
||||
fileID, err = storage.getIDFromPath(filePath)
|
||||
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
||||
if err != nil {
|
||||
LOG_TRACE("GCD_STORAGE", "Ignored file deletion error: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
err = storage.service.Files.Delete(fileID).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
storage.deletePathID(filePath)
|
||||
return nil
|
||||
} else if retry {
|
||||
@@ -399,19 +476,27 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
|
||||
|
||||
fileID, ok := storage.findPathID(from)
|
||||
if !ok {
|
||||
return fmt.Errorf("Attempting to rename file %s with unknown id", to)
|
||||
return fmt.Errorf("Attempting to rename file %s with unknown id", from)
|
||||
}
|
||||
|
||||
fromParentID := storage.getPathID("chunks")
|
||||
toParentID := storage.getPathID("fossils")
|
||||
fromParent := path.Dir(from)
|
||||
fromParentID, err := storage.getIDFromPath(threadIndex, fromParent, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to retrieve the id of the parent directory '%s': %v", fromParent, err)
|
||||
}
|
||||
if fromParentID == "" {
|
||||
return fmt.Errorf("The parent directory '%s' does not exist", fromParent)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(from, "fossils") {
|
||||
fromParentID, toParentID = toParentID, fromParentID
|
||||
toParent := path.Dir(to)
|
||||
toParentID, err := storage.getIDFromPath(threadIndex, toParent, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to retrieve the id of the parent directory '%s': %v", toParent, err)
|
||||
}
|
||||
|
||||
for {
|
||||
_, err = storage.service.Files.Update(fileID, nil).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
continue
|
||||
@@ -425,7 +510,7 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateDirectory creates a new directory.
|
||||
// createDirectory creates a new directory.
|
||||
func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||
|
||||
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||
@@ -444,13 +529,15 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
||||
return nil
|
||||
}
|
||||
|
||||
parentID := storage.getPathID("")
|
||||
name := dir
|
||||
|
||||
if strings.HasPrefix(dir, "snapshots/") {
|
||||
parentID = storage.getPathID("snapshots")
|
||||
name = dir[len("snapshots/"):]
|
||||
parentDir := path.Dir(dir)
|
||||
if parentDir == "." {
|
||||
parentDir = ""
|
||||
}
|
||||
parentID := storage.getPathID(parentDir)
|
||||
if parentID == "" {
|
||||
return fmt.Errorf("Parent directory '%s' does not exist", parentDir)
|
||||
}
|
||||
name := path.Base(dir)
|
||||
|
||||
file := &drive.File{
|
||||
Name: name,
|
||||
@@ -460,14 +547,23 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
||||
|
||||
for {
|
||||
file, err = storage.service.Files.Create(file).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
} else {
|
||||
|
||||
// Check if the directory has already been created by other thread
|
||||
exist, _, _, newErr := storage.GetFileInfo(threadIndex, dir)
|
||||
if newErr == nil && exist {
|
||||
return nil
|
||||
}
|
||||
|
||||
if retry {
|
||||
continue
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
storage.savePathID(dir, file.Id)
|
||||
return nil
|
||||
@@ -478,20 +574,23 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
||||
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||
filePath = filePath[:len(filePath)-1]
|
||||
}
|
||||
filePath = storage.convertFilePath(filePath)
|
||||
|
||||
// GetFileInfo is never called on a fossil
|
||||
fileID, ok := storage.findPathID(filePath)
|
||||
if !ok {
|
||||
dir := path.Dir(filePath)
|
||||
if dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
dirID, err := storage.getIDFromPath(dir)
|
||||
dirID, err := storage.getIDFromPath(threadIndex, dir, false)
|
||||
if err != nil {
|
||||
return false, false, 0, err
|
||||
}
|
||||
if dirID == "" {
|
||||
return false, false, 0, nil
|
||||
}
|
||||
|
||||
fileID, isDir, size, err = storage.listByName(dirID, path.Base(filePath))
|
||||
fileID, isDir, size, err = storage.listByName(threadIndex, dirID, path.Base(filePath))
|
||||
if fileID != "" {
|
||||
storage.savePathID(filePath, fileID)
|
||||
}
|
||||
@@ -500,7 +599,7 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
||||
|
||||
for {
|
||||
file, err := storage.service.Files.Get(fileID).Fields("id, mimeType").Do()
|
||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
return true, file.MimeType == "application/vnd.google-apps.folder", file.Size, nil
|
||||
} else if retry {
|
||||
continue
|
||||
@@ -510,44 +609,22 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
||||
}
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *GCDStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
parentID := ""
|
||||
filePath = "chunks/" + chunkID
|
||||
realPath := storage.convertFilePath(filePath)
|
||||
if isFossil {
|
||||
parentID = storage.getPathID("fossils")
|
||||
filePath += ".fsl"
|
||||
} else {
|
||||
parentID = storage.getPathID("chunks")
|
||||
}
|
||||
|
||||
fileID := ""
|
||||
fileID, _, size, err = storage.listByName(parentID, chunkID)
|
||||
if fileID != "" {
|
||||
storage.savePathID(realPath, fileID)
|
||||
}
|
||||
return filePath, fileID != "", size, err
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
// We never download the fossil so there is no need to convert the path
|
||||
fileID, ok := storage.findPathID(filePath)
|
||||
if !ok {
|
||||
fileID, err = storage.getIDFromPath(filePath)
|
||||
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
storage.savePathID(filePath, fileID)
|
||||
if fileID == "" {
|
||||
return fmt.Errorf("%s does not exist", filePath)
|
||||
}
|
||||
|
||||
var response *http.Response
|
||||
|
||||
for {
|
||||
response, err = storage.service.Files.Get(fileID).Download()
|
||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
continue
|
||||
@@ -572,14 +649,10 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
||||
parent = ""
|
||||
}
|
||||
|
||||
parentID, ok := storage.findPathID(parent)
|
||||
if !ok {
|
||||
parentID, err = storage.getIDFromPath(parent)
|
||||
parentID, err := storage.getIDFromPath(threadIndex, parent, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
storage.savePathID(parent, parentID)
|
||||
}
|
||||
|
||||
file := &drive.File{
|
||||
Name: path.Base(filePath),
|
||||
@@ -590,7 +663,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
||||
for {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
continue
|
||||
@@ -604,16 +677,16 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *GCDStorage) IsCacheNeeded() (bool) { return true }
|
||||
func (storage *GCDStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *GCDStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *GCDStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *GCDStorage) IsStrongConsistent() (bool) { return false }
|
||||
func (storage *GCDStorage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *GCDStorage) IsFastListing() (bool) { return true }
|
||||
func (storage *GCDStorage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *GCDStorage) EnableTestMode() { storage.TestMode = true }
|
||||
|
||||
@@ -1,37 +1,36 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
"net/url"
|
||||
"math/rand"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
gcs "cloud.google.com/go/storage"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
gcs "cloud.google.com/go/storage"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
type GCSStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
bucket *gcs.BucketHandle
|
||||
storageDir string
|
||||
|
||||
numberOfThreads int
|
||||
TestMode bool
|
||||
|
||||
}
|
||||
|
||||
type GCSConfig struct {
|
||||
@@ -102,8 +101,9 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th
|
||||
numberOfThreads: threads,
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
|
||||
}
|
||||
|
||||
func (storage *GCSStorage) shouldRetry(backoff *int, err error) (bool, error) {
|
||||
@@ -149,7 +149,6 @@ func (storage *GCSStorage) shouldRetry(backoff *int, err error) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||
@@ -240,19 +239,6 @@ func (storage *GCSStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
||||
return true, false, attributes.Size, nil
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *GCSStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
filePath = "chunks/" + chunkID
|
||||
if isFossil {
|
||||
filePath += ".fsl"
|
||||
}
|
||||
|
||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||
|
||||
return filePath, exist, size, err
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *GCSStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
readCloser, err := storage.bucket.Object(storage.storageDir + filePath).NewReader(context.Background())
|
||||
@@ -288,16 +274,16 @@ func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *GCSStorage) IsCacheNeeded() (bool) { return true }
|
||||
func (storage *GCSStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *GCSStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *GCSStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *GCSStorage) IsStrongConsistent() (bool) { return true }
|
||||
func (storage *GCSStorage) IsStrongConsistent() bool { return true }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *GCSStorage) IsFastListing() (bool) { return true }
|
||||
func (storage *GCSStorage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }
|
||||
|
||||
@@ -1,21 +1,22 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"sync"
|
||||
"bytes"
|
||||
"strings"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
net_url "net/url"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -64,7 +65,17 @@ func NewHubicClient(tokenFile string) (*HubicClient, error) {
|
||||
}
|
||||
|
||||
client := &HubicClient{
|
||||
HTTPClient: http.DefaultClient,
|
||||
HTTPClient: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
TLSHandshakeTimeout: 60 * time.Second,
|
||||
ResponseHeaderTimeout: 30 * time.Second,
|
||||
ExpectContinueTimeout: 10 * time.Second,
|
||||
},
|
||||
},
|
||||
TokenFile: tokenFile,
|
||||
Token: token,
|
||||
TokenLock: &sync.Mutex{},
|
||||
@@ -425,7 +436,7 @@ func (client *HubicClient) MoveFile(from string, to string) error {
|
||||
return client.DeleteFile(from)
|
||||
}
|
||||
|
||||
func (client *HubicClient) CreateDirectory(path string) (error) {
|
||||
func (client *HubicClient) CreateDirectory(path string) error {
|
||||
|
||||
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||
path = path[:len(path)-1]
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"fmt"
|
||||
"testing"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
crypto_rand "crypto/rand"
|
||||
"math/rand"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
type HubicStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
client *HubicClient
|
||||
storageDir string
|
||||
@@ -64,8 +64,9 @@ func CreateHubicStorage(tokenFile string, storagePath string, threads int) (stor
|
||||
}
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
@@ -158,18 +159,6 @@ func (storage *HubicStorage) GetFileInfo(threadIndex int, filePath string) (exis
|
||||
return storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *HubicStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
filePath = "chunks/" + chunkID
|
||||
if isFossil {
|
||||
filePath += ".fsl"
|
||||
}
|
||||
|
||||
exist, _, size, err = storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||
return filePath, exist, size, err
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *HubicStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
|
||||
@@ -190,16 +179,16 @@ func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, conten
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *HubicStorage) IsCacheNeeded() (bool) { return true }
|
||||
func (storage *HubicStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *HubicStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *HubicStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *HubicStorage) IsStrongConsistent() (bool) { return false }
|
||||
func (storage *HubicStorage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *HubicStorage) IsFastListing() (bool) { return true }
|
||||
func (storage *HubicStorage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *HubicStorage) EnableTestMode() {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
// +build !windows
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
var keyringFile string
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"time"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"testing"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,6 +23,7 @@ const (
|
||||
ASSERT = 4
|
||||
)
|
||||
|
||||
var LogFunction func(level int, logID string, message string)
|
||||
|
||||
var printLogHeader = false
|
||||
|
||||
@@ -117,6 +118,11 @@ func logf(level int, logID string, format string, v ...interface{}) {
|
||||
|
||||
message := fmt.Sprintf(format, v...)
|
||||
|
||||
if LogFunction != nil {
|
||||
LogFunction(level, logID, message)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Uncomment this line to enable unbufferred logging for tests
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"sync"
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -41,6 +42,7 @@ type OneDriveClient struct {
|
||||
Token *oauth2.Token
|
||||
TokenLock *sync.Mutex
|
||||
|
||||
IsConnected bool
|
||||
TestMode bool
|
||||
}
|
||||
|
||||
@@ -87,7 +89,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
case []byte:
|
||||
inputReader = bytes.NewReader(input.([]byte))
|
||||
case int:
|
||||
inputReader = bytes.NewReader([]byte(""))
|
||||
inputReader = nil
|
||||
case *bytes.Buffer:
|
||||
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
||||
case *RateLimitedReader:
|
||||
@@ -115,9 +117,27 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
|
||||
response, err = client.HTTPClient.Do(request)
|
||||
if err != nil {
|
||||
if client.IsConnected {
|
||||
if strings.Contains(err.Error(), "TLS handshake timeout") {
|
||||
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
|
||||
// idle connections are not to be reused on reconnect.
|
||||
retryAfter := time.Duration(rand.Float32()*60000 + 180000)
|
||||
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
|
||||
time.Sleep(retryAfter * time.Millisecond)
|
||||
} else {
|
||||
// For all other errors just blindly retry until the maximum is reached
|
||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||
LOG_INFO("ONEDRIVE_RETRY", "%v; retry after %d milliseconds", err, retryAfter)
|
||||
time.Sleep(retryAfter * time.Millisecond)
|
||||
}
|
||||
backoff *= 2
|
||||
continue
|
||||
}
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
client.IsConnected = true
|
||||
|
||||
if response.StatusCode < 400 {
|
||||
return response.Body, response.ContentLength, nil
|
||||
}
|
||||
@@ -139,19 +159,18 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
return nil, 0, err
|
||||
}
|
||||
continue
|
||||
} else if response.StatusCode == 500 || response.StatusCode == 503 || response.StatusCode == 509 {
|
||||
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||
LOG_INFO("ONEDRIVE_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||
time.Sleep(retryAfter * time.Millisecond)
|
||||
backoff *= 2
|
||||
continue
|
||||
} else {
|
||||
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
||||
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
|
||||
return nil, 0, OneDriveError{Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response")}
|
||||
}
|
||||
|
||||
errorResponse.Error.Status = response.StatusCode
|
||||
|
||||
return nil, 0, errorResponse.Error
|
||||
}
|
||||
}
|
||||
@@ -169,7 +188,7 @@ func (client *OneDriveClient) RefreshToken() (err error) {
|
||||
|
||||
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
@@ -321,7 +340,7 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) CreateDirectory(path string, name string) (error) {
|
||||
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||
|
||||
url := OneDriveAPIURL + "/root/children"
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"fmt"
|
||||
"testing"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
crypto_rand "crypto/rand"
|
||||
"math/rand"
|
||||
@@ -30,7 +30,6 @@ func TestOneDriveClient(t *testing.T) {
|
||||
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
|
||||
}
|
||||
|
||||
|
||||
testID, _, _, err := oneDriveClient.GetFileInfo("test")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list the test directory: %v", err)
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
type OneDriveStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
client *OneDriveClient
|
||||
storageDir string
|
||||
@@ -65,10 +65,19 @@ func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (s
|
||||
}
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
|
||||
}
|
||||
|
||||
func (storage *OneDriveStorage) convertFilePath(filePath string) string {
|
||||
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||
}
|
||||
return filePath
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||
@@ -105,19 +114,29 @@ func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string
|
||||
} else {
|
||||
files := []string{}
|
||||
sizes := []int64{}
|
||||
for _, parent := range []string {"chunks", "fossils" } {
|
||||
parents := []string{"chunks", "fossils"}
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := parents[i]
|
||||
entries, err := storage.client.ListEntries(storage.storageDir + "/" + parent)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if len(entry.Folder) == 0 {
|
||||
name := entry.Name
|
||||
if parent == "fossils" {
|
||||
name += ".fsl"
|
||||
if strings.HasPrefix(parent, "fossils") {
|
||||
name = parent + "/" + name + ".fsl"
|
||||
name = name[len("fossils/"):]
|
||||
} else {
|
||||
name = parent + "/" + name
|
||||
name = name[len("chunks/"):]
|
||||
}
|
||||
files = append(files, name)
|
||||
sizes = append(sizes, entry.Size)
|
||||
} else {
|
||||
parents = append(parents, parent+"/"+entry.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return files, sizes, nil
|
||||
@@ -127,9 +146,7 @@ func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string
|
||||
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *OneDriveStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
if strings.HasSuffix(filePath, ".fsl") && strings.HasPrefix(filePath, "chunks/") {
|
||||
filePath = "fossils/" + filePath[len("chunks/"):len(filePath) - len(".fsl")]
|
||||
}
|
||||
filePath = storage.convertFilePath(filePath)
|
||||
|
||||
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
||||
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
|
||||
@@ -141,14 +158,11 @@ func (storage *OneDriveStorage) DeleteFile(threadIndex int, filePath string) (er
|
||||
|
||||
// MoveFile renames the file.
|
||||
func (storage *OneDriveStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
fromPath := storage.storageDir + "/" + from
|
||||
toParent := storage.storageDir + "/fossils"
|
||||
if strings.HasSuffix(from, ".fsl") {
|
||||
fromPath = storage.storageDir + "/fossils/" + from[len("chunks/"):len(from) - len(".fsl")]
|
||||
toParent = storage.storageDir + "/chunks"
|
||||
}
|
||||
|
||||
err = storage.client.MoveFile(fromPath, toParent)
|
||||
fromPath := storage.storageDir + "/" + storage.convertFilePath(from)
|
||||
toPath := storage.storageDir + "/" + storage.convertFilePath(to)
|
||||
|
||||
err = storage.client.MoveFile(fromPath, path.Dir(toPath))
|
||||
if err != nil {
|
||||
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||
LOG_DEBUG("ONEDRIVE_MOVE", "Ignore 409 conflict error")
|
||||
@@ -180,24 +194,13 @@ func (storage *OneDriveStorage) GetFileInfo(threadIndex int, filePath string) (e
|
||||
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||
filePath = filePath[:len(filePath)-1]
|
||||
}
|
||||
|
||||
filePath = storage.convertFilePath(filePath)
|
||||
|
||||
fileID, isDir, size, err := storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||
return fileID != "", isDir, size, err
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *OneDriveStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
filePath = "chunks/" + chunkID
|
||||
realPath := storage.storageDir + "/" + filePath
|
||||
if isFossil {
|
||||
filePath += ".fsl"
|
||||
realPath = storage.storageDir + "/fossils/" + chunkID
|
||||
}
|
||||
|
||||
fileID, _, size, err := storage.client.GetFileInfo(realPath)
|
||||
return filePath, fileID != "", size, err
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *OneDriveStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
|
||||
@@ -225,16 +228,16 @@ func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, con
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *OneDriveStorage) IsCacheNeeded() (bool) { return true }
|
||||
func (storage *OneDriveStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *OneDriveStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *OneDriveStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *OneDriveStorage) IsStrongConsistent() (bool) { return false }
|
||||
func (storage *OneDriveStorage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *OneDriveStorage) IsFastListing() (bool) { return true }
|
||||
func (storage *OneDriveStorage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *OneDriveStorage) EnableTestMode() {
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Preference stores options for each storage.
|
||||
@@ -43,7 +44,7 @@ func LoadPreferences(repository string) bool {
|
||||
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
|
||||
return false
|
||||
}
|
||||
realPreferencePath := string(content)
|
||||
realPreferencePath := strings.TrimSpace(string(content))
|
||||
stat, err := os.Stat(realPreferencePath)
|
||||
if err != nil {
|
||||
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
|
||||
@@ -73,6 +74,13 @@ func LoadPreferences(repository string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, preference := range Preferences {
|
||||
if strings.ToLower(preference.Name) == "ssh" {
|
||||
LOG_ERROR("PREFERENCE_INVALID", "'%s' is an invalid storage name", preference.Name)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -90,7 +98,7 @@ func SetDuplicacyPreferencePath(p string) {
|
||||
preferencePath = p
|
||||
}
|
||||
|
||||
func SavePreferences() (bool) {
|
||||
func SavePreferences() bool {
|
||||
description, err := json.MarshalIndent(Preferences, "", " ")
|
||||
if err != nil {
|
||||
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
||||
@@ -98,7 +106,7 @@ func SavePreferences() (bool) {
|
||||
}
|
||||
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
|
||||
|
||||
err = ioutil.WriteFile(preferenceFile, description, 0644)
|
||||
err = ioutil.WriteFile(preferenceFile, description, 0600)
|
||||
if err != nil {
|
||||
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)
|
||||
return false
|
||||
@@ -107,10 +115,10 @@ func SavePreferences() (bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
func FindPreference(name string) (*Preference) {
|
||||
for _, preference := range Preferences {
|
||||
func FindPreference(name string) *Preference {
|
||||
for i, preference := range Preferences {
|
||||
if preference.Name == name || preference.StorageURL == name {
|
||||
return &preference
|
||||
return &Preferences[i]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
196
src/duplicacy_s3cstorage.go
Normal file
196
src/duplicacy_s3cstorage.go
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/gilbertchen/goamz/aws"
|
||||
"github.com/gilbertchen/goamz/s3"
|
||||
)
|
||||
|
||||
// S3CStorage is a storage backend for s3 compatible storages that require V2 Signing.
|
||||
type S3CStorage struct {
|
||||
StorageBase
|
||||
|
||||
buckets []*s3.Bucket
|
||||
storageDir string
|
||||
}
|
||||
|
||||
// CreateS3CStorage creates a amazon s3 storage object.
|
||||
func CreateS3CStorage(regionName string, endpoint string, bucketName string, storageDir string,
|
||||
accessKey string, secretKey string, threads int) (storage *S3CStorage, err error) {
|
||||
|
||||
var region aws.Region
|
||||
|
||||
if endpoint == "" {
|
||||
if regionName == "" {
|
||||
regionName = "us-east-1"
|
||||
}
|
||||
region = aws.Regions[regionName]
|
||||
} else {
|
||||
region = aws.Region{Name: regionName, S3Endpoint: "https://" + endpoint}
|
||||
}
|
||||
|
||||
auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey}
|
||||
|
||||
var buckets []*s3.Bucket
|
||||
for i := 0; i < threads; i++ {
|
||||
s3Client := s3.New(auth, region)
|
||||
s3Client.AttemptStrategy = aws.AttemptStrategy{
|
||||
Min: 8,
|
||||
Total: 300 * time.Second,
|
||||
Delay: 1000 * time.Millisecond,
|
||||
}
|
||||
|
||||
bucket := s3Client.Bucket(bucketName)
|
||||
buckets = append(buckets, bucket)
|
||||
}
|
||||
|
||||
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||
storageDir += "/"
|
||||
}
|
||||
|
||||
storage = &S3CStorage{
|
||||
buckets: buckets,
|
||||
storageDir: storageDir,
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||
dir += "/"
|
||||
}
|
||||
|
||||
dirLength := len(storage.storageDir + dir)
|
||||
if dir == "snapshots/" {
|
||||
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "/", "", 100)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, subDir := range results.CommonPrefixes {
|
||||
files = append(files, subDir[dirLength:])
|
||||
}
|
||||
return files, nil, nil
|
||||
} else if dir == "chunks/" {
|
||||
marker := ""
|
||||
for {
|
||||
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", marker, 1000)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, object := range results.Contents {
|
||||
files = append(files, object.Key[dirLength:])
|
||||
sizes = append(sizes, object.Size)
|
||||
}
|
||||
|
||||
if !results.IsTruncated {
|
||||
break
|
||||
}
|
||||
|
||||
marker = results.Contents[len(results.Contents)-1].Key
|
||||
}
|
||||
return files, sizes, nil
|
||||
|
||||
} else {
|
||||
|
||||
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", "", 1000)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, object := range results.Contents {
|
||||
files = append(files, object.Key[dirLength:])
|
||||
}
|
||||
return files, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *S3CStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
|
||||
}
|
||||
|
||||
// MoveFile renames the file.
|
||||
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
|
||||
options := s3.CopyOptions{ContentType: "application/duplicacy"}
|
||||
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir+to, s3.Private, options, storage.buckets[threadIndex].Name+"/"+storage.storageDir+from)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return storage.DeleteFile(threadIndex, from)
|
||||
}
|
||||
|
||||
// CreateDirectory creates a new directory.
|
||||
func (storage *S3CStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||
|
||||
response, err := storage.buckets[threadIndex].Head(storage.storageDir+filePath, nil)
|
||||
if err != nil {
|
||||
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
|
||||
return false, false, 0, nil
|
||||
} else {
|
||||
return false, false, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if response.StatusCode == 403 || response.StatusCode == 404 {
|
||||
return false, false, 0, nil
|
||||
} else {
|
||||
return true, false, response.ContentLength, nil
|
||||
}
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.buckets))
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
|
||||
options := s3.Options{}
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.buckets))
|
||||
return storage.buckets[threadIndex].PutReader(storage.storageDir+filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
|
||||
}
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *S3CStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *S3CStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *S3CStorage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *S3CStorage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *S3CStorage) EnableTestMode() {}
|
||||
@@ -1,10 +1,13 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
@@ -13,7 +16,7 @@ import (
|
||||
)
|
||||
|
||||
type S3Storage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
client *s3.S3
|
||||
bucket string
|
||||
@@ -23,7 +26,8 @@ type S3Storage struct {
|
||||
|
||||
// CreateS3Storage creates a amazon s3 storage object.
|
||||
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
|
||||
accessKey string, secretKey string, threads int) (storage *S3Storage, err error) {
|
||||
accessKey string, secretKey string, threads int,
|
||||
isSSLSupported bool, isMinioCompatible bool) (storage *S3Storage, err error) {
|
||||
|
||||
token := ""
|
||||
|
||||
@@ -49,10 +53,12 @@ func CreateS3Storage(regionName string, endpoint string, bucketName string, stor
|
||||
}
|
||||
}
|
||||
|
||||
config := &aws.Config {
|
||||
s3Config := &aws.Config{
|
||||
Region: aws.String(regionName),
|
||||
Credentials: auth,
|
||||
Endpoint: aws.String(endpoint),
|
||||
S3ForcePathStyle: aws.Bool(isMinioCompatible),
|
||||
DisableSSL: aws.Bool(!isSSLSupported),
|
||||
}
|
||||
|
||||
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||
@@ -60,12 +66,14 @@ func CreateS3Storage(regionName string, endpoint string, bucketName string, stor
|
||||
}
|
||||
|
||||
storage = &S3Storage{
|
||||
client: s3.New(session.New(config)),
|
||||
client: s3.New(session.New(s3Config)),
|
||||
bucket: bucketName,
|
||||
storageDir: storageDir,
|
||||
numberOfThreads: threads,
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
@@ -182,25 +190,6 @@ func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
||||
}
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *S3Storage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
|
||||
filePath = "chunks/" + chunkID
|
||||
if isFossil {
|
||||
filePath += ".fsl"
|
||||
}
|
||||
|
||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
} else {
|
||||
return filePath, exist, size, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
@@ -224,6 +213,9 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
|
||||
attempts := 0
|
||||
|
||||
for {
|
||||
input := &s3.PutObjectInput{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Key: aws.String(storage.storageDir + filePath),
|
||||
@@ -233,21 +225,29 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
||||
}
|
||||
|
||||
_, err = storage.client.PutObject(input)
|
||||
if err == nil || attempts >= 3 || !strings.Contains(err.Error(), "XAmzContentSHA256Mismatch") {
|
||||
return err
|
||||
}
|
||||
|
||||
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
||||
attempts += 1
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *S3Storage) IsCacheNeeded () (bool) { return true }
|
||||
func (storage *S3Storage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *S3Storage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *S3Storage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *S3Storage) IsStrongConsistent() (bool) { return false }
|
||||
func (storage *S3Storage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *S3Storage) IsFastListing() (bool) { return true }
|
||||
func (storage *S3Storage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *S3Storage) EnableTestMode() {}
|
||||
|
||||
@@ -1,62 +1,63 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"net"
|
||||
"path"
|
||||
"time"
|
||||
"runtime"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
"github.com/pkg/sftp"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type SFTPStorage struct {
|
||||
RateLimitedStorage
|
||||
StorageBase
|
||||
|
||||
client *sftp.Client
|
||||
minimumNesting int // The minimum level of directories to dive into before searching for the chunk file.
|
||||
storageDir string
|
||||
numberOfThreads int
|
||||
}
|
||||
|
||||
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
|
||||
password string, threads int) (storage *SFTPStorage, err error) {
|
||||
minimumNesting int, password string, threads int) (storage *SFTPStorage, err error) {
|
||||
|
||||
authMethods := []ssh.AuthMethod{ssh.Password(password)}
|
||||
|
||||
|
||||
hostKeyCallback := func(hostname string, remote net.Addr,
|
||||
key ssh.PublicKey) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyCallback, threads)
|
||||
return CreateSFTPStorage(server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
|
||||
}
|
||||
|
||||
func CreateSFTPStorage(server string, port int, username string, storageDir string,
|
||||
func CreateSFTPStorage(server string, port int, username string, storageDir string, minimumNesting int,
|
||||
authMethods []ssh.AuthMethod,
|
||||
hostKeyCallback func(hostname string, remote net.Addr,
|
||||
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
||||
|
||||
config := &ssh.ClientConfig{
|
||||
sftpConfig := &ssh.ClientConfig{
|
||||
User: username,
|
||||
Auth: authMethods,
|
||||
HostKeyCallback: hostKeyCallback,
|
||||
}
|
||||
|
||||
if server == "sftp.hidrive.strato.com" {
|
||||
config.Ciphers = []string {"aes128-cbc", "aes128-ctr", "aes256-ctr"}
|
||||
sftpConfig.Ciphers = []string{"aes128-cbc", "aes128-ctr", "aes256-ctr"}
|
||||
}
|
||||
|
||||
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
||||
connection, err := ssh.Dial("tcp", serverAddress, config)
|
||||
connection, err := ssh.Dial("tcp", serverAddress, sftpConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -83,6 +84,7 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
||||
storage = &SFTPStorage{
|
||||
client: client,
|
||||
storageDir: storageDir,
|
||||
minimumNesting: minimumNesting,
|
||||
numberOfThreads: threads,
|
||||
}
|
||||
|
||||
@@ -91,6 +93,8 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
||||
|
||||
runtime.SetFinalizer(storage, CloseSFTPStorage)
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
@@ -175,67 +179,6 @@ func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
||||
return true, fileInfo.IsDir(), fileInfo.Size(), nil
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||
// the suffix '.fsl'.
|
||||
func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
dir := path.Join(storage.storageDir, "chunks")
|
||||
|
||||
suffix := ""
|
||||
if isFossil {
|
||||
suffix = ".fsl"
|
||||
}
|
||||
|
||||
// The minimum level of directories to dive into before searching for the chunk file.
|
||||
minimumLevel := 2
|
||||
|
||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
||||
if level >= minimumLevel {
|
||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
||||
if stat, err := storage.client.Stat(filePath); err == nil && !stat.IsDir() {
|
||||
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
||||
} else if err == nil && stat.IsDir() {
|
||||
return filePath[len(storage.storageDir) + 1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||
}
|
||||
}
|
||||
|
||||
// Find the subdirectory the chunk file may reside.
|
||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
||||
stat, err := storage.client.Stat(subDir)
|
||||
if err == nil && stat.IsDir() {
|
||||
dir = subDir
|
||||
continue
|
||||
}
|
||||
|
||||
if level < minimumLevel {
|
||||
// Create the subdirectory if is doesn't exist.
|
||||
|
||||
if err == nil && !stat.IsDir() {
|
||||
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
|
||||
}
|
||||
|
||||
err = storage.client.Mkdir(subDir)
|
||||
if err != nil {
|
||||
// The directory may have been created by other threads so check it again.
|
||||
stat, _ := storage.client.Stat(subDir)
|
||||
if stat == nil || !stat.IsDir() {
|
||||
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
dir = subDir
|
||||
continue
|
||||
}
|
||||
|
||||
// Teh chunk must be under this subdirectory but it doesn't exist.
|
||||
return path.Join(dir, chunkID[2 * level:])[len(storage.storageDir) + 1:] + suffix, false, 0, nil
|
||||
|
||||
}
|
||||
|
||||
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
||||
chunkID)
|
||||
return "", false, 0, nil
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
file, err := storage.client.Open(path.Join(storage.storageDir, filePath))
|
||||
@@ -257,6 +200,30 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
fullPath := path.Join(storage.storageDir, filePath)
|
||||
|
||||
dirs := strings.Split(filePath, "/")
|
||||
if len(dirs) > 1 {
|
||||
fullDir := path.Dir(fullPath)
|
||||
_, err := storage.client.Stat(fullDir)
|
||||
if err != nil {
|
||||
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
|
||||
// we just assume it is the former because there isn't a way to tell which is the case.
|
||||
for i, _ := range dirs[1 : len(dirs)-1] {
|
||||
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
||||
// We don't check the error; just keep going blindly but always store the last err
|
||||
err = storage.client.Mkdir(subDir)
|
||||
}
|
||||
|
||||
// If there is an error creating the dirs, we check fullDir one more time, because another thread
|
||||
// may happen to create the same fullDir ahead of this thread
|
||||
if err != nil {
|
||||
_, err := storage.client.Stat(fullDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||
suffix := make([]byte, 8)
|
||||
for i := range suffix {
|
||||
@@ -285,7 +252,7 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
||||
storage.client.Remove(temporaryFile)
|
||||
return nil
|
||||
} else {
|
||||
return fmt.Errorf("Uploaded file but failed to store it at %s", fullPath)
|
||||
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -294,16 +261,23 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *SFTPStorage) IsCacheNeeded () (bool) { return true }
|
||||
func (storage *SFTPStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *SFTPStorage) IsMoveFileImplemented() (bool) { return true }
|
||||
func (storage *SFTPStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *SFTPStorage) IsStrongConsistent() (bool) { return true }
|
||||
func (storage *SFTPStorage) IsStrongConsistent() bool { return true }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *SFTPStorage) IsFastListing() (bool) { return false }
|
||||
func (storage *SFTPStorage) IsFastListing() bool {
|
||||
for _, level := range storage.readLevels {
|
||||
if level > 1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *SFTPStorage) EnableTestMode() {}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
// +build !windows
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
"time"
|
||||
"os"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
ole "github.com/gilbertchen/go-ole"
|
||||
)
|
||||
@@ -77,7 +77,6 @@ func getIVSSAsync(unknown *ole.IUnknown, iid *ole.GUID) (async *IVSSAsync) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
|
||||
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
|
||||
|
||||
@@ -238,7 +237,7 @@ type SnapshotProperties struct {
|
||||
Status int
|
||||
}
|
||||
|
||||
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) (int) {
|
||||
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) int {
|
||||
var ret uintptr
|
||||
if runtime.GOARCH == "386" {
|
||||
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
|
||||
@@ -292,8 +291,7 @@ func (vss *IVSS) DeleteSnapshots(snapshotID ole.GUID) (int, int, ole.GUID) {
|
||||
return int(ret), int(deleted), deletedGUID
|
||||
}
|
||||
|
||||
|
||||
func uint16ArrayToString(p *uint16) (string) {
|
||||
func uint16ArrayToString(p *uint16) string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
@@ -418,7 +416,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
||||
return top
|
||||
}
|
||||
|
||||
if !async.Wait(20) {
|
||||
if !async.Wait(60) {
|
||||
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata didn't finish properly")
|
||||
return top
|
||||
}
|
||||
@@ -458,7 +456,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
||||
return top
|
||||
}
|
||||
|
||||
if !async.Wait(20) {
|
||||
if !async.Wait(60) {
|
||||
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup didn't finish properly")
|
||||
return top
|
||||
}
|
||||
@@ -475,15 +473,13 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
||||
return top
|
||||
}
|
||||
|
||||
if !async.Wait(60) {
|
||||
if !async.Wait(180) {
|
||||
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet didn't finish properly")
|
||||
return top
|
||||
}
|
||||
async.Release()
|
||||
|
||||
|
||||
properties := SnapshotProperties {
|
||||
}
|
||||
properties := SnapshotProperties{}
|
||||
|
||||
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
|
||||
if ret != 0 {
|
||||
@@ -521,5 +517,3 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
||||
return shadowLink + "\\" + top[2:]
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"time"
|
||||
"path"
|
||||
"strings"
|
||||
"strconv"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Snapshot represents a backup of the repository.
|
||||
@@ -76,17 +76,30 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
|
||||
continue
|
||||
}
|
||||
|
||||
if pattern[0] != '+' && pattern[0] != '-' {
|
||||
if pattern[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsUnspecifiedFilter(pattern) {
|
||||
pattern = "+" + pattern
|
||||
}
|
||||
|
||||
if pattern == "+" || pattern == "-" {
|
||||
if IsEmptyFilter(pattern) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
valid, err := IsValidRegex(pattern[2:])
|
||||
if !valid || err != nil {
|
||||
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||
}
|
||||
}
|
||||
|
||||
patterns = append(patterns, pattern)
|
||||
}
|
||||
|
||||
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
||||
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||
|
||||
if IsTracing() {
|
||||
@@ -149,6 +162,7 @@ func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
||||
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||
description, err := ioutil.ReadFile(snapshotFile)
|
||||
if err != nil {
|
||||
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -156,6 +170,7 @@ func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
||||
|
||||
err = json.Unmarshal(description, &incompleteSnapshot)
|
||||
if err != nil {
|
||||
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -163,6 +178,7 @@ func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
||||
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
|
||||
hash, err := hex.DecodeString(chunkHash)
|
||||
if err != nil {
|
||||
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
|
||||
return nil
|
||||
}
|
||||
chunkHashes = append(chunkHashes, string(hash))
|
||||
@@ -173,7 +189,7 @@ func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
||||
ChunkHashes: chunkHashes,
|
||||
ChunkLengths: incompleteSnapshot.ChunkLengths,
|
||||
}
|
||||
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snpashot loaded from %s", snapshotFile)
|
||||
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
|
||||
return snapshot
|
||||
}
|
||||
|
||||
@@ -181,7 +197,8 @@ func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
||||
func SaveIncompleteSnapshot(snapshot *Snapshot) {
|
||||
var files []*Entry
|
||||
for _, file := range snapshot.Files {
|
||||
if file.EndChunk >= 0 {
|
||||
// All unprocessed files will have a size of -1
|
||||
if file.Size >= 0 {
|
||||
file.Attributes = nil
|
||||
files = append(files, file)
|
||||
} else {
|
||||
@@ -199,7 +216,7 @@ func SaveIncompleteSnapshot(snapshot *Snapshot) {
|
||||
ChunkLengths: snapshot.ChunkLengths,
|
||||
}
|
||||
|
||||
description, err := json.Marshal(incompleteSnapshot)
|
||||
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
|
||||
if err != nil {
|
||||
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
|
||||
return
|
||||
@@ -392,7 +409,7 @@ func (snapshot *Snapshot) SetSequence(sequenceType string, sequence [] string) {
|
||||
}
|
||||
|
||||
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
|
||||
func encodeSequence(sequence[] string) ([] string) {
|
||||
func encodeSequence(sequence []string) []string {
|
||||
|
||||
sequenceInHex := make([]string, len(sequence))
|
||||
|
||||
@@ -402,5 +419,3 @@ func encodeSequence(sequence[] string) ([] string) {
|
||||
|
||||
return sequenceInHex
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,23 +1,25 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"fmt"
|
||||
"sort"
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
"path"
|
||||
"io/ioutil"
|
||||
"encoding/json"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/aryann/difflib"
|
||||
)
|
||||
@@ -172,7 +174,6 @@ type SnapshotManager struct {
|
||||
snapshotCache *FileStorage
|
||||
|
||||
chunkDownloader *ChunkDownloader
|
||||
|
||||
}
|
||||
|
||||
// CreateSnapshotManager creates a snapshot manager
|
||||
@@ -232,7 +233,7 @@ type sequenceReader struct {
|
||||
sequence []string
|
||||
buffer *bytes.Buffer
|
||||
index int
|
||||
refillFunc func(hash string) ([]byte)
|
||||
refillFunc func(hash string) []byte
|
||||
}
|
||||
|
||||
// Read reads a new chunk using the refill function when there is no more data in the buffer
|
||||
@@ -275,7 +276,7 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
||||
reader := sequenceReader{
|
||||
sequence: snapshot.FileSequence,
|
||||
buffer: new(bytes.Buffer),
|
||||
refillFunc: func (chunkHash string) ([]byte) {
|
||||
refillFunc: func(chunkHash string) []byte {
|
||||
i := manager.chunkDownloader.AddChunk(chunkHash)
|
||||
chunk := manager.chunkDownloader.WaitForChunk(i)
|
||||
return chunk.GetBytes()
|
||||
@@ -303,12 +304,8 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
||||
return false
|
||||
}
|
||||
|
||||
if patterns == nil {
|
||||
if len(patterns) != 0 && !MatchPath(entry.Path, patterns) {
|
||||
entry.Attributes = nil
|
||||
} else if len(patterns) != 0 {
|
||||
if !MatchPath(entry.Path, patterns) {
|
||||
entry.Attributes = nil
|
||||
}
|
||||
}
|
||||
|
||||
files = append(files, &entry)
|
||||
@@ -317,7 +314,6 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
// DownloadSnapshotSequence downloads the content represented by a sequence of chunks, and then unmarshal the content
|
||||
// using the specified 'loadFunction'. It purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
|
||||
// in a snapshot.
|
||||
@@ -333,7 +329,6 @@ func (manager *SnapshotManager) DownloadSnapshotSequence(snapshot *Snapshot, seq
|
||||
|
||||
content := manager.DownloadSequence(sequence)
|
||||
|
||||
|
||||
if len(content) == 0 {
|
||||
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load %s specified in the snapshot %s at revision %d",
|
||||
sequenceType, snapshot.ID, snapshot.Revision)
|
||||
@@ -597,22 +592,6 @@ func (manager *SnapshotManager) ListAllFiles(storage Storage, top string) (allFi
|
||||
allSizes = append(allSizes, sizes[i])
|
||||
}
|
||||
}
|
||||
|
||||
if top == "chunks/" {
|
||||
// We're listing all chunks so this is the perfect place to detect if a directory contains too many
|
||||
// chunks. Create sub-directories if necessary
|
||||
if len(files) > 1024 && !storage.IsFastListing() {
|
||||
for i := 0; i < 256; i++ {
|
||||
subdir := dir + fmt.Sprintf("%02x\n", i)
|
||||
manager.storage.CreateDirectory(0, subdir)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Remove chunk sub-directories that are empty
|
||||
if len(files) == 0 && strings.HasPrefix(dir, "chunks/") && dir != "chunks/" {
|
||||
storage.DeleteFile(0, dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allFiles, allSizes
|
||||
@@ -664,7 +643,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
||||
if snapshotID == "" {
|
||||
snapshotIDs, err = manager.ListSnapshotIDs()
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snpashots: %v", err)
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||
return 0
|
||||
}
|
||||
} else {
|
||||
@@ -750,7 +729,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
||||
}
|
||||
|
||||
// ListSnapshots shows the information about a snapshot.
|
||||
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool,
|
||||
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
|
||||
checkFiles bool, searchFossils bool, resurrect bool) bool {
|
||||
|
||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
||||
@@ -787,7 +766,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
if snapshotID == "" || showStatistics {
|
||||
snapshotIDs, err := manager.ListSnapshotIDs()
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snpashots: %v", err)
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -799,7 +778,6 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
snapshotMap[snapshotID] = nil
|
||||
}
|
||||
|
||||
|
||||
snapshotIDIndex := 0
|
||||
for snapshotID, _ = range snapshotMap {
|
||||
|
||||
@@ -898,8 +876,18 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
snapshotIDIndex += 1
|
||||
}
|
||||
|
||||
if showTabular {
|
||||
manager.ShowStatisticsTabular(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||
} else if showStatistics {
|
||||
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||
}
|
||||
|
||||
if showStatistics {
|
||||
return true
|
||||
}
|
||||
|
||||
// Print snapshot and revision statistics
|
||||
func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapshot, chunkSizeMap map[string]int64, chunkUniqueMap map[string]bool,
|
||||
chunkSnapshotMap map[string]int) {
|
||||
for snapshotID, snapshotList := range snapshotMap {
|
||||
|
||||
snapshotChunks := make(map[string]bool)
|
||||
@@ -946,8 +934,87 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
// Print snapshot and revision statistics in tabular format
|
||||
func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*Snapshot, chunkSizeMap map[string]int64, chunkUniqueMap map[string]bool,
|
||||
chunkSnapshotMap map[string]int) {
|
||||
tableBuffer := new(bytes.Buffer)
|
||||
tableWriter := tabwriter.NewWriter(tableBuffer, 0, 0, 1, ' ', tabwriter.AlignRight|tabwriter.Debug)
|
||||
|
||||
for snapshotID, snapshotList := range snapshotMap {
|
||||
fmt.Fprintln(tableWriter, "")
|
||||
fmt.Fprintln(tableWriter, " snap \trev \t \tfiles \tbytes \tchunks \tbytes \tuniq \tbytes \tnew \tbytes \t")
|
||||
snapshotChunks := make(map[string]bool)
|
||||
|
||||
earliestSeenChunks := make(map[string]int)
|
||||
|
||||
for _, snapshot := range snapshotList {
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot) {
|
||||
if earliestSeenChunks[chunkID] == 0 {
|
||||
earliestSeenChunks[chunkID] = math.MaxInt32
|
||||
}
|
||||
earliestSeenChunks[chunkID] = MinInt(earliestSeenChunks[chunkID], snapshot.Revision)
|
||||
}
|
||||
}
|
||||
|
||||
for _, snapshot := range snapshotList {
|
||||
|
||||
chunks := make(map[string]bool)
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot) {
|
||||
chunks[chunkID] = true
|
||||
snapshotChunks[chunkID] = true
|
||||
}
|
||||
|
||||
var totalChunkSize int64
|
||||
var uniqueChunkSize int64
|
||||
var totalChunkCount int64
|
||||
var uniqueChunkCount int64
|
||||
var newChunkCount int64
|
||||
var newChunkSize int64
|
||||
|
||||
for chunkID, _ := range chunks {
|
||||
chunkSize := chunkSizeMap[chunkID]
|
||||
totalChunkSize += chunkSize
|
||||
totalChunkCount += 1
|
||||
if earliestSeenChunks[chunkID] == snapshot.Revision {
|
||||
newChunkCount += 1
|
||||
newChunkSize += chunkSize
|
||||
}
|
||||
if chunkUniqueMap[chunkID] {
|
||||
uniqueChunkSize += chunkSize
|
||||
uniqueChunkCount += 1
|
||||
}
|
||||
}
|
||||
|
||||
files := " \t "
|
||||
if snapshot.FileSize != 0 && snapshot.NumberOfFiles != 0 {
|
||||
files = fmt.Sprintf("%d \t%s", snapshot.NumberOfFiles, PrettyNumber(snapshot.FileSize))
|
||||
}
|
||||
creationTime := time.Unix(snapshot.StartTime, 0).Format("2006-01-02 15:04")
|
||||
fmt.Fprintln(tableWriter, fmt.Sprintf(
|
||||
"%s \t%d \t@ %s %5s \t%s \t%d \t%s \t%d \t%s \t%d \t%s \t",
|
||||
snapshotID, snapshot.Revision, creationTime, snapshot.Options, files, totalChunkCount, PrettyNumber(totalChunkSize), uniqueChunkCount, PrettyNumber(uniqueChunkSize), newChunkCount, PrettyNumber(newChunkSize)))
|
||||
}
|
||||
|
||||
var totalChunkSize int64
|
||||
var uniqueChunkSize int64
|
||||
var totalChunkCount int64
|
||||
var uniqueChunkCount int64
|
||||
for chunkID, _ := range snapshotChunks {
|
||||
chunkSize := chunkSizeMap[chunkID]
|
||||
totalChunkSize += chunkSize
|
||||
totalChunkCount += 1
|
||||
|
||||
if chunkSnapshotMap[chunkID] != -1 {
|
||||
uniqueChunkSize += chunkSize
|
||||
uniqueChunkCount += 1
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(tableWriter, fmt.Sprintf(
|
||||
"%s \tall \t \t \t \t%d \t%s \t%d \t%s \t \t \t",
|
||||
snapshotID, totalChunkCount, PrettyNumber(totalChunkSize), uniqueChunkCount, PrettyNumber(uniqueChunkSize)))
|
||||
}
|
||||
tableWriter.Flush()
|
||||
LOG_INFO("SNAPSHOT_CHECK", tableBuffer.String())
|
||||
}
|
||||
|
||||
// ConvertSequence converts a sequence of chunk hashes into a sequence of chunk ids.
|
||||
@@ -1038,7 +1105,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
|
||||
}
|
||||
|
||||
// RetrieveFile retrieve the file in the specifed snapshot.
|
||||
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)()) bool {
|
||||
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
|
||||
|
||||
if file.Size == 0 {
|
||||
return true
|
||||
@@ -1092,15 +1159,18 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
||||
}
|
||||
|
||||
// FindFile returns the file entry that has the given file name.
|
||||
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string) (*Entry) {
|
||||
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) *Entry {
|
||||
for _, entry := range snapshot.Files {
|
||||
if entry.Path == filePath {
|
||||
return entry
|
||||
}
|
||||
}
|
||||
|
||||
if !suppressError {
|
||||
LOG_ERROR("SNAPSHOT_FIND", "No file %s found in snapshot %s at revision %d",
|
||||
filePath, snapshot.ID, snapshot.Revision)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1139,7 +1209,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
|
||||
return true
|
||||
}
|
||||
|
||||
file := manager.FindFile(snapshot, path)
|
||||
file := manager.FindFile(snapshot, path, false)
|
||||
var content []byte
|
||||
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
|
||||
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
||||
@@ -1147,7 +1217,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
|
||||
return false
|
||||
}
|
||||
|
||||
fmt.Printf("%s\n", string(content))
|
||||
fmt.Printf("%s", string(content))
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -1188,7 +1258,6 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
leftSnapshot = manager.DownloadSnapshot(snapshotID, revisions[0])
|
||||
}
|
||||
|
||||
|
||||
if len(filePath) > 0 {
|
||||
|
||||
manager.DownloadSnapshotContents(leftSnapshot, nil)
|
||||
@@ -1197,7 +1266,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
}
|
||||
|
||||
var leftFile []byte
|
||||
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath), func(content []byte) {
|
||||
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath, false), func(content []byte) {
|
||||
leftFile = append(leftFile, content...)
|
||||
}) {
|
||||
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
|
||||
@@ -1207,7 +1276,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
|
||||
var rightFile []byte
|
||||
if rightSnapshot != nil {
|
||||
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath), func(content []byte) {
|
||||
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath, false), func(content []byte) {
|
||||
rightFile = append(rightFile, content...)
|
||||
}) {
|
||||
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
|
||||
@@ -1376,7 +1445,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
||||
for _, revision := range revisions {
|
||||
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
||||
manager.DownloadSnapshotFileSequence(snapshot, nil)
|
||||
file := manager.FindFile(snapshot, filePath)
|
||||
file := manager.FindFile(snapshot, filePath, true)
|
||||
|
||||
if file != nil {
|
||||
|
||||
@@ -1394,7 +1463,6 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
||||
LOG_INFO("SNAPSHOT_HISTORY", "%7d:", revision)
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
stat, err := os.Stat(joinPath(top, filePath))
|
||||
@@ -1420,7 +1488,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
||||
|
||||
// fossilizeChunk turns the chunk into a fossil.
|
||||
func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string,
|
||||
exclusive bool, collection *FossilCollection) (bool) {
|
||||
exclusive bool, collection *FossilCollection) bool {
|
||||
if exclusive {
|
||||
err := manager.storage.DeleteFile(0, filePath)
|
||||
if err != nil {
|
||||
@@ -1456,7 +1524,7 @@ func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string,
|
||||
}
|
||||
|
||||
// resurrectChunk turns the fossil back into a chunk
|
||||
func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string) (bool) {
|
||||
func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string) bool {
|
||||
chunkPath, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
|
||||
if err != nil {
|
||||
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", chunkID, err)
|
||||
@@ -1479,8 +1547,6 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
|
||||
// PruneSnapshots deletes snapshots by revisions, tags, or a retention policy. The main idea is two-step
|
||||
// fossil collection.
|
||||
// 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced
|
||||
@@ -1593,7 +1659,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
// because we need to find out which chunks are not referenced.
|
||||
snapshotIDs, err := manager.ListSnapshotIDs()
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snpashots: %v", err)
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1630,7 +1696,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
|
||||
referencedFossils := make(map[string]bool)
|
||||
|
||||
// Find fossil collections previsouly created, and delete fossils and temporary files in them if they are
|
||||
// Find fossil collections previously created, and delete fossils and temporary files in them if they are
|
||||
// deletable.
|
||||
for _, collectionName := range collections {
|
||||
|
||||
@@ -1765,6 +1831,8 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
// If revisions are specified ignore tags and the retention policy.
|
||||
for _, snapshot := range snapshots {
|
||||
if _, found := revisionMap[snapshot.Revision]; found {
|
||||
LOG_DEBUG("SNAPSHOT_DELETE", "Snapshot %s at revision %d to be deleted - specified in command",
|
||||
snapshot.ID, snapshot.Revision)
|
||||
snapshot.Flag = true
|
||||
toBeDeleted++
|
||||
}
|
||||
@@ -1802,12 +1870,16 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
if i < len(retentionPolicies) {
|
||||
if retentionPolicies[i].Interval == 0 {
|
||||
// No snapshots to keep if interval is 0
|
||||
LOG_DEBUG("SNAPSHOT_DELETE", "Snapshot %s at revision %d to be deleted - older than %d days",
|
||||
snapshot.ID, snapshot.Revision, retentionPolicies[i].Age)
|
||||
snapshot.Flag = true
|
||||
toBeDeleted++
|
||||
} else if lastSnapshotTime != 0 &&
|
||||
int(snapshot.StartTime-lastSnapshotTime) < retentionPolicies[i].Interval*secondsInDay-600 {
|
||||
// Delete the snapshot if it is too close to the last kept one. Note that a tolerance of 10
|
||||
// minutes was subtracted from the interval.
|
||||
LOG_DEBUG("SNAPSHOT_DELETE", "Snapshot %s at revision %d to be deleted - older than %d days, less than %d days from previous",
|
||||
snapshot.ID, snapshot.Revision, retentionPolicies[i].Age, retentionPolicies[i].Interval)
|
||||
snapshot.Flag = true
|
||||
toBeDeleted++
|
||||
} else {
|
||||
@@ -2021,7 +2093,6 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
// Save the fossil collection if it is not empty.
|
||||
@@ -2066,7 +2137,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
snapshot.ID, snapshot.Revision)
|
||||
}
|
||||
manager.snapshotCache.DeleteFile(0, snapshotPath)
|
||||
fmt.Fprintf(logFile, "Deleted snapshot %s at revision %d\n", snapshot.ID, snapshot.Revision)
|
||||
fmt.Fprintf(logFile, "Deleted cached snapshot %s at revision %d\n", snapshot.ID, snapshot.Revision)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2075,17 +2146,20 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
"No fossil collection has been created since deleted snapshots did not reference any unique chunks")
|
||||
}
|
||||
|
||||
var latestSnapshot *Snapshot
|
||||
var latestSnapshot *Snapshot = nil
|
||||
if len(allSnapshots[selfID]) > 0 {
|
||||
latestSnapshot = allSnapshots[selfID][len(allSnapshots[selfID])-1]
|
||||
}
|
||||
|
||||
if latestSnapshot != nil && !latestSnapshot.Flag {
|
||||
manager.CleanSnapshotCache(latestSnapshot, allSnapshots)
|
||||
} else {
|
||||
manager.CleanSnapshotCache(nil, allSnapshots)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
// CheckSnapshot performs sanity checks on the given snapshot.
|
||||
func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
||||
|
||||
@@ -2181,7 +2255,9 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
||||
if len(entries) > 0 && entries[0].StartChunk != 0 {
|
||||
return fmt.Errorf("The first file starts at chunk %d", entries[0].StartChunk)
|
||||
}
|
||||
if lastChunk < numberOfChunks - 1 {
|
||||
|
||||
// There may be a last chunk whose size is 0 so we allow this to happen
|
||||
if lastChunk < numberOfChunks-2 {
|
||||
return fmt.Errorf("The last file ends at chunk %d but the number of chunks is %d", lastChunk, numberOfChunks)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"os"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
||||
@@ -95,14 +95,14 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
|
||||
os.RemoveAll(testDir)
|
||||
os.MkdirAll(testDir, 0700)
|
||||
|
||||
storage, _ := CreateFileStorage(testDir, 1)
|
||||
storage, _ := CreateFileStorage(testDir, false, 1)
|
||||
storage.CreateDirectory(0, "chunks")
|
||||
storage.CreateDirectory(0, "snapshots")
|
||||
config := CreateConfig()
|
||||
snapshotManager := CreateSnapshotManager(config, storage)
|
||||
|
||||
cacheDir := path.Join(testDir, "cache")
|
||||
snapshotCache, _ := CreateFileStorage(cacheDir, 1)
|
||||
snapshotCache, _ := CreateFileStorage(cacheDir, false, 1)
|
||||
snapshotCache.CreateDirectory(0, "chunks")
|
||||
snapshotCache.CreateDirectory(0, "snapshots")
|
||||
|
||||
@@ -181,7 +181,7 @@ func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expecte
|
||||
|
||||
snapshotIDs, err = manager.ListSnapshotIDs()
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snpashots: %v", err)
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -1,27 +1,30 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"strconv"
|
||||
"os"
|
||||
"net"
|
||||
"path"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/agent"
|
||||
)
|
||||
|
||||
type Storage interface {
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
ListFiles(threadIndex int, dir string) (files []string, size []int64, err error)
|
||||
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
||||
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
|
||||
ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error)
|
||||
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
DeleteFile(threadIndex int, filePath string) (err error)
|
||||
@@ -45,18 +48,21 @@ type Storage interface {
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
UploadFile(threadIndex int, filePath string, content []byte) (err error)
|
||||
|
||||
// SetNestingLevels sets up the chunk nesting structure.
|
||||
SetNestingLevels(config *Config)
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
IsCacheNeeded() (bool)
|
||||
IsCacheNeeded() bool
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
IsMoveFileImplemented() (bool)
|
||||
IsMoveFileImplemented() bool
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
IsStrongConsistent() (bool)
|
||||
IsStrongConsistent() bool
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
IsFastListing() (bool)
|
||||
IsFastListing() bool
|
||||
|
||||
// Enable the test mode.
|
||||
EnableTestMode()
|
||||
@@ -65,19 +71,102 @@ type Storage interface {
|
||||
SetRateLimits(downloadRateLimit int, uploadRateLimit int)
|
||||
}
|
||||
|
||||
type RateLimitedStorage struct {
|
||||
DownloadRateLimit int
|
||||
UploadRateLimit int
|
||||
// StorageBase is the base struct from which all storages are derived from
|
||||
type StorageBase struct {
|
||||
DownloadRateLimit int // Maximum download rate (bytes/seconds)
|
||||
UploadRateLimit int // Maximum upload reate (bytes/seconds)
|
||||
|
||||
DerivedStorage Storage // Used as the pointer to the derived storage class
|
||||
|
||||
readLevels []int // At which nesting level to find the chunk with the given id
|
||||
writeLevel int // Store the uploaded chunk to this level
|
||||
}
|
||||
|
||||
func (storage *RateLimitedStorage) SetRateLimits(downloadRateLimit int, uploadRateLimit int) {
|
||||
// SetRateLimits sets the maximum download and upload rates
|
||||
func (storage *StorageBase) SetRateLimits(downloadRateLimit int, uploadRateLimit int) {
|
||||
storage.DownloadRateLimit = downloadRateLimit
|
||||
storage.UploadRateLimit = uploadRateLimit
|
||||
}
|
||||
|
||||
// SetDefaultNestingLevels sets the default read and write levels. This is usually called by
|
||||
// derived storages to set the levels with old values so that storages initialied by ealier versions
|
||||
// will continue to work.
|
||||
func (storage *StorageBase) SetDefaultNestingLevels(readLevels []int, writeLevel int) {
|
||||
storage.readLevels = readLevels
|
||||
storage.writeLevel = writeLevel
|
||||
}
|
||||
|
||||
// SetNestingLevels sets the new read and write levels (normally both at 1) if the 'config' file has
|
||||
// the 'fixed-nesting' key, or if a file named 'nesting' exists on the storage.
|
||||
func (storage *StorageBase) SetNestingLevels(config *Config) {
|
||||
|
||||
// 'FixedNesting' is true only for the 'config' file with the new format (2.0.10+)
|
||||
if config.FixedNesting {
|
||||
|
||||
storage.readLevels = nil
|
||||
|
||||
// Check if the 'nesting' file exist
|
||||
exist, _, _, err := storage.DerivedStorage.GetFileInfo(0, "nesting")
|
||||
if err == nil && exist {
|
||||
nestingFile := CreateChunk(CreateConfig(), true)
|
||||
if storage.DerivedStorage.DownloadFile(0, "config", nestingFile) == nil {
|
||||
var nesting struct {
|
||||
ReadLevels []int `json:"read-levels"`
|
||||
WriteLevel int `json:"write-level"`
|
||||
}
|
||||
if json.Unmarshal(nestingFile.GetBytes(), &nesting) == nil {
|
||||
storage.readLevels = nesting.ReadLevels
|
||||
storage.writeLevel = nesting.WriteLevel
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(storage.readLevels) == 0 {
|
||||
storage.readLevels = []int{1}
|
||||
storage.writeLevel = 1
|
||||
}
|
||||
}
|
||||
|
||||
LOG_DEBUG("STORAGE_NESTING", "Chunk read levels: %v, write level: %d", storage.readLevels, storage.writeLevel)
|
||||
for _, level := range storage.readLevels {
|
||||
if storage.writeLevel == level {
|
||||
return
|
||||
}
|
||||
}
|
||||
LOG_ERROR("STORAGE_NESTING", "The write level %d isn't in the read levels %v", storage.readLevels, storage.writeLevel)
|
||||
}
|
||||
|
||||
// FindChunk finds the chunk with the specified id at the levels one by one as specified by 'readLevels'.
|
||||
func (storage *StorageBase) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||
chunkPaths := make([]string, 0)
|
||||
for _, level := range storage.readLevels {
|
||||
chunkPath := "chunks/"
|
||||
for i := 0; i < level; i++ {
|
||||
chunkPath += chunkID[2*i:2*i+2] + "/"
|
||||
}
|
||||
chunkPath += chunkID[2*level:]
|
||||
if isFossil {
|
||||
chunkPath += ".fsl"
|
||||
}
|
||||
exist, _, size, err = storage.DerivedStorage.GetFileInfo(threadIndex, chunkPath)
|
||||
if err == nil && exist {
|
||||
return chunkPath, exist, size, err
|
||||
}
|
||||
chunkPaths = append(chunkPaths, chunkPath)
|
||||
}
|
||||
for i, level := range storage.readLevels {
|
||||
if storage.writeLevel == level {
|
||||
return chunkPaths[i], false, 0, nil
|
||||
}
|
||||
}
|
||||
return "", false, 0, fmt.Errorf("Invalid chunk nesting setup")
|
||||
}
|
||||
|
||||
func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||
|
||||
preferencePath := GetDuplicacyPreferencePath()
|
||||
if preferencePath == "" {
|
||||
return fmt.Errorf("Can't verify SSH host since the preference path is not set")
|
||||
}
|
||||
hostFile := path.Join(preferencePath, "known_hosts")
|
||||
file, err := os.OpenFile(hostFile, os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
@@ -127,6 +216,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
storageURL := preference.StorageURL
|
||||
|
||||
isFileStorage := false
|
||||
isCacheNeeded := false
|
||||
|
||||
if strings.HasPrefix(storageURL, "/") {
|
||||
isFileStorage = true
|
||||
@@ -140,11 +230,30 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
|
||||
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
|
||||
isFileStorage = true
|
||||
isCacheNeeded = true
|
||||
}
|
||||
}
|
||||
|
||||
if isFileStorage {
|
||||
fileStorage, err := CreateFileStorage(storageURL, threads)
|
||||
fileStorage, err := CreateFileStorage(storageURL, isCacheNeeded, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
return fileStorage
|
||||
}
|
||||
|
||||
if strings.HasPrefix(storageURL, "flat://") {
|
||||
fileStorage, err := CreateFileStorage(storageURL[7:], false, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
return fileStorage
|
||||
}
|
||||
|
||||
if strings.HasPrefix(storageURL, "samba://") {
|
||||
fileStorage, err := CreateFileStorage(storageURL[8:], true, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
@@ -180,6 +289,9 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
username = username[:len(username)-1]
|
||||
}
|
||||
|
||||
// If ssh_key_file is set, skip password-based login
|
||||
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
|
||||
|
||||
password := ""
|
||||
passwordCallback := func() (string, error) {
|
||||
LOG_DEBUG("SSH_PASSWORD", "Attempting password login")
|
||||
@@ -199,7 +311,6 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
}
|
||||
}
|
||||
|
||||
keyFile := ""
|
||||
publicKeysCallback := func() ([]ssh.Signer, error) {
|
||||
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
|
||||
|
||||
@@ -215,6 +326,8 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
signers, err = sshAgent.Signers()
|
||||
if err != nil {
|
||||
LOG_DEBUG("SSH_AGENT", "Can't log in using public key authentication via agent: %v", err)
|
||||
} else if len(signers) == 0 {
|
||||
LOG_DEBUG("SSH_AGENT", "SSH agent doesn't return any signer")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -252,11 +365,19 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
|
||||
}
|
||||
|
||||
authMethods := [] ssh.AuthMethod {
|
||||
authMethods := []ssh.AuthMethod{}
|
||||
passwordAuthMethods := []ssh.AuthMethod{
|
||||
ssh.PasswordCallback(passwordCallback),
|
||||
ssh.KeyboardInteractive(keyboardInteractive),
|
||||
}
|
||||
keyFileAuthMethods := []ssh.AuthMethod{
|
||||
ssh.PublicKeysCallback(publicKeysCallback),
|
||||
}
|
||||
if keyFile != "" {
|
||||
authMethods = append(keyFileAuthMethods, passwordAuthMethods...)
|
||||
} else {
|
||||
authMethods = append(passwordAuthMethods, keyFileAuthMethods...)
|
||||
}
|
||||
|
||||
if RunInBackground {
|
||||
|
||||
@@ -281,7 +402,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
return checkHostKey(hostname, remote, key)
|
||||
}
|
||||
|
||||
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)
|
||||
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
@@ -293,7 +414,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
SavePassword(preference, "ssh_password", password)
|
||||
}
|
||||
return sftpStorage
|
||||
} else if matched[1] == "s3" {
|
||||
} else if matched[1] == "s3" || matched[1] == "s3c" || matched[1] == "minio" || matched[1] == "minios" {
|
||||
|
||||
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
|
||||
|
||||
@@ -319,19 +440,31 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
|
||||
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
|
||||
|
||||
s3Storage, err := CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
|
||||
var err error
|
||||
|
||||
if matched[1] == "s3c" {
|
||||
storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios")
|
||||
isSSLSupported := (matched[1] == "s3" || matched[1] == "minios")
|
||||
storage, err = CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads, isSSLSupported, isMinioCompatible)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
SavePassword(preference, "s3_id", accessKey)
|
||||
SavePassword(preference, "s3_secret", secretKey)
|
||||
|
||||
return s3Storage
|
||||
return storage
|
||||
} else if matched[1] == "dropbox" {
|
||||
storageDir := matched[3] + matched[5]
|
||||
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
|
||||
dropboxStorage, err := CreateDropboxStorage(token, storageDir, threads)
|
||||
dropboxStorage, err := CreateDropboxStorage(token, storageDir, 1, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
|
||||
return nil
|
||||
|
||||
@@ -1,23 +1,22 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"time"
|
||||
"flag"
|
||||
"path"
|
||||
"testing"
|
||||
"strings"
|
||||
"strconv"
|
||||
"io/ioutil"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
crypto_rand "crypto/rand"
|
||||
"math/rand"
|
||||
@@ -41,51 +40,104 @@ func init() {
|
||||
func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||
|
||||
if testStorageName == "" || testStorageName == "file" {
|
||||
return CreateFileStorage(localStoragePath, threads)
|
||||
storage, err := CreateFileStorage(localStoragePath, false, threads)
|
||||
if storage != nil {
|
||||
// Use a read level of at least 2 because this will catch more errors than a read level of 1.
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
}
|
||||
return storage, err
|
||||
}
|
||||
|
||||
config, err := ioutil.ReadFile("test_storage.conf")
|
||||
description, err := ioutil.ReadFile("test_storage.conf")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storages := make(map[string]map[string]string)
|
||||
configs := make(map[string]map[string]string)
|
||||
|
||||
err = json.Unmarshal(config, &storages)
|
||||
err = json.Unmarshal(description, &configs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storage, found := storages[testStorageName]
|
||||
config, found := configs[testStorageName]
|
||||
if !found {
|
||||
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
|
||||
}
|
||||
|
||||
if testStorageName == "sftp" {
|
||||
port, _ := strconv.Atoi(storage["port"])
|
||||
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
|
||||
} else if testStorageName == "s3" {
|
||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
|
||||
if testStorageName == "flat" {
|
||||
storage, err := CreateFileStorage(localStoragePath, false, threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "samba" {
|
||||
storage, err := CreateFileStorage(localStoragePath, true, threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "sftp" {
|
||||
port, _ := strconv.Atoi(config["port"])
|
||||
storage, err := CreateSFTPStorageWithPassword(config["server"], port, config["username"], config["directory"], 2, config["password"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "s3" || testStorageName == "wasabi" {
|
||||
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "s3c" {
|
||||
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "digitalocean" {
|
||||
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "minio" {
|
||||
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, false, true)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "minios" {
|
||||
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, true)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "dropbox" {
|
||||
return CreateDropboxStorage(storage["token"], storage["directory"], threads)
|
||||
storage, err := CreateDropboxStorage(config["token"], config["directory"], 1, threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "b2" {
|
||||
return CreateB2Storage(storage["account"], storage["key"], storage["bucket"], threads)
|
||||
storage, err := CreateB2Storage(config["account"], config["key"], config["bucket"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcs-s3" {
|
||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
|
||||
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcs" {
|
||||
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
|
||||
storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcs-sa" {
|
||||
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
|
||||
storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "azure" {
|
||||
return CreateAzureStorage(storage["account"], storage["key"], storage["container"], threads)
|
||||
storage, err := CreateAzureStorage(config["account"], config["key"], config["container"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "acd" {
|
||||
return CreateACDStorage(storage["token_file"], storage["storage_path"], threads)
|
||||
storage, err := CreateACDStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcd" {
|
||||
return CreateGCDStorage(storage["token_file"], storage["storage_path"], threads)
|
||||
storage, err := CreateGCDStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "one" {
|
||||
return CreateOneDriveStorage(storage["token_file"], storage["storage_path"], threads)
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "hubic" {
|
||||
return CreateHubicStorage(storage["token_file"], storage["storage_path"], threads)
|
||||
storage, err := CreateHubicStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
||||
}
|
||||
@@ -256,6 +308,33 @@ func TestStorage(t *testing.T) {
|
||||
|
||||
storage.CreateDirectory(0, "snapshots/repository1")
|
||||
storage.CreateDirectory(0, "snapshots/repository2")
|
||||
|
||||
storage.CreateDirectory(0, "shared")
|
||||
|
||||
// Upload to the same directory by multiple goroutines
|
||||
count := 8
|
||||
finished := make(chan int, count)
|
||||
for i := 0; i < count; i++ {
|
||||
go func(name string) {
|
||||
err := storage.UploadFile(0, name, []byte("this is a test file"))
|
||||
if err != nil {
|
||||
t.Errorf("Error to upload '%s': %v", name, err)
|
||||
}
|
||||
finished <- 0
|
||||
}(fmt.Sprintf("shared/a/b/c/%d", i))
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
<-finished
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
storage.DeleteFile(0, fmt.Sprintf("shared/a/b/c/%d", i))
|
||||
}
|
||||
storage.DeleteFile(0, "shared/a/b/c")
|
||||
storage.DeleteFile(0, "shared/a/b")
|
||||
storage.DeleteFile(0, "shared/a")
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
{
|
||||
|
||||
@@ -328,7 +407,7 @@ func TestStorage(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
numberOfFiles := 20
|
||||
numberOfFiles := 10
|
||||
maxFileSize := 64 * 1024
|
||||
|
||||
if testQuickMode {
|
||||
@@ -364,15 +443,7 @@ func TestStorage(t *testing.T) {
|
||||
t.Errorf("Failed to upload the file %s: %v", filePath, err)
|
||||
return
|
||||
}
|
||||
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", chunkID, len(content))
|
||||
}
|
||||
|
||||
allChunks := [] string {}
|
||||
for _, file := range listChunks(storage) {
|
||||
file = strings.Replace(file, "/", "", -1)
|
||||
if len(file) == 64 {
|
||||
allChunks = append(allChunks, file)
|
||||
}
|
||||
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", filePath, len(content))
|
||||
}
|
||||
|
||||
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[0])
|
||||
@@ -386,7 +457,6 @@ func TestStorage(t *testing.T) {
|
||||
|
||||
chunk := CreateChunk(config, true)
|
||||
|
||||
|
||||
for _, chunkID := range chunks {
|
||||
|
||||
chunk.Reset(false)
|
||||
@@ -403,7 +473,7 @@ func TestStorage(t *testing.T) {
|
||||
t.Errorf("Error downloading file %s: %v", filePath, err)
|
||||
continue
|
||||
}
|
||||
LOG_INFO("STORAGE_CHUNK", "Downloaded chunk: %s, size: %d", chunkID, chunk.GetLength())
|
||||
LOG_INFO("STORAGE_CHUNK", "Downloaded chunk: %s, size: %d", filePath, chunk.GetLength())
|
||||
}
|
||||
|
||||
hasher := sha256.New()
|
||||
@@ -438,6 +508,11 @@ func TestStorage(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
allChunks := []string{}
|
||||
for _, file := range listChunks(storage) {
|
||||
allChunks = append(allChunks, file)
|
||||
}
|
||||
|
||||
for _, file := range allChunks {
|
||||
|
||||
err = storage.DeleteFile(0, "chunks/"+file)
|
||||
@@ -448,3 +523,72 @@ func TestStorage(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCleanStorage(t *testing.T) {
|
||||
setTestingT(t)
|
||||
SetLoggingLevel(INFO)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
switch e := r.(type) {
|
||||
case Exception:
|
||||
t.Errorf("%s %s", e.LogID, e.Message)
|
||||
debug.PrintStack()
|
||||
default:
|
||||
t.Errorf("%v", e)
|
||||
debug.PrintStack()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||
os.RemoveAll(testDir)
|
||||
os.MkdirAll(testDir, 0700)
|
||||
|
||||
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||
|
||||
storage, err := loadStorage(testDir, 1)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create storage: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
directories := make([]string, 0, 1024)
|
||||
directories = append(directories, "snapshots/")
|
||||
directories = append(directories, "chunks/")
|
||||
|
||||
for len(directories) > 0 {
|
||||
|
||||
dir := directories[len(directories)-1]
|
||||
directories = directories[:len(directories)-1]
|
||||
|
||||
LOG_INFO("LIST_FILES", "Listing %s", dir)
|
||||
|
||||
files, _, err := storage.ListFiles(0, dir)
|
||||
if err != nil {
|
||||
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||
directories = append(directories, dir+file)
|
||||
} else {
|
||||
storage.DeleteFile(0, dir+file)
|
||||
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
storage.DeleteFile(0, "config")
|
||||
LOG_INFO("DELETE_FILE", "Deleted config")
|
||||
|
||||
|
||||
files, _, err := storage.ListFiles(0, "chunks/")
|
||||
for _, file := range files {
|
||||
if len(file) > 0 && file[len(file)-1] != '/' {
|
||||
LOG_DEBUG("FILE_EXIST", "File %s exists after deletion", file)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -1,26 +1,25 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"bufio"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"strconv"
|
||||
"runtime"
|
||||
"crypto/sha256"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"github.com/gilbertchen/gopass"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
)
|
||||
|
||||
var RunInBackground bool = false
|
||||
@@ -32,7 +31,17 @@ type RateLimitedReader struct {
|
||||
StartTime time.Time
|
||||
}
|
||||
|
||||
func CreateRateLimitedReader(content []byte, rate int) (*RateLimitedReader) {
|
||||
var RegexMap map[string]*regexp.Regexp
|
||||
|
||||
func init() {
|
||||
|
||||
if RegexMap == nil {
|
||||
RegexMap = make(map[string]*regexp.Regexp)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateRateLimitedReader(content []byte, rate int) *RateLimitedReader {
|
||||
return &RateLimitedReader{
|
||||
Content: content,
|
||||
Rate: float64(rate * 1024),
|
||||
@@ -40,7 +49,42 @@ func CreateRateLimitedReader(content []byte, rate int) (*RateLimitedReader) {
|
||||
}
|
||||
}
|
||||
|
||||
func (reader *RateLimitedReader) Length() (int64) {
|
||||
func IsEmptyFilter(pattern string) bool {
|
||||
if pattern == "+" || pattern == "-" || pattern == "i:" || pattern == "e:" {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func IsUnspecifiedFilter(pattern string) bool {
|
||||
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func IsValidRegex(pattern string) (valid bool, err error) {
|
||||
|
||||
var re *regexp.Regexp = nil
|
||||
|
||||
if re, valid = RegexMap[pattern]; valid && re != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
re, err = regexp.Compile(pattern)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else {
|
||||
RegexMap[pattern] = re
|
||||
LOG_DEBUG("REGEX_STORED", "Saved compiled regex for pattern \"%s\", regex=%#v", pattern, re)
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
|
||||
func (reader *RateLimitedReader) Length() int64 {
|
||||
return int64(len(reader.Content))
|
||||
}
|
||||
|
||||
@@ -115,14 +159,12 @@ func RateLimitedCopy(writer io.Writer, reader io.Reader, rate int) (written int6
|
||||
}
|
||||
|
||||
// GenerateKeyFromPassword generates a key from the password.
|
||||
func GenerateKeyFromPassword(password string) []byte {
|
||||
return pbkdf2.Key([]byte(password), DEFAULT_KEY, 16384, 32, sha256.New)
|
||||
func GenerateKeyFromPassword(password string, salt []byte, iterations int) []byte {
|
||||
return pbkdf2.Key([]byte(password), salt, iterations, 32, sha256.New)
|
||||
}
|
||||
|
||||
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
||||
func GetPassword(preference Preference, passwordType string, prompt string,
|
||||
showPassword bool, resetPassword bool) (string) {
|
||||
|
||||
// Get password from preference, env, but don't start any keyring request
|
||||
func GetPasswordFromPreference(preference Preference, passwordType string) string {
|
||||
passwordID := passwordType
|
||||
if preference.Name != "default" {
|
||||
passwordID = preference.Name + "_" + passwordID
|
||||
@@ -136,16 +178,42 @@ func GetPassword(preference Preference, passwordType string, prompt string,
|
||||
}
|
||||
}
|
||||
|
||||
// If the password is stored in the preference, there is no need to include the storage name
|
||||
// (i.e., preference.Name) in the key, so the key name should really be passwordType rather
|
||||
// than passwordID; we're using passwordID here only for backward compatibility
|
||||
if len(preference.Keys) > 0 && len(preference.Keys[passwordID]) > 0 {
|
||||
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordID)
|
||||
LOG_DEBUG("PASSWORD_PREFERENCE", "Reading %s from preferences", passwordID)
|
||||
return preference.Keys[passwordID]
|
||||
}
|
||||
|
||||
if len(preference.Keys) > 0 && len(preference.Keys[passwordType]) > 0 {
|
||||
LOG_DEBUG("PASSWORD_PREFERENCE", "Reading %s from preferences", passwordType)
|
||||
return preference.Keys[passwordType]
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
||||
func GetPassword(preference Preference, passwordType string, prompt string,
|
||||
showPassword bool, resetPassword bool) string {
|
||||
passwordID := passwordType
|
||||
|
||||
preferencePassword := GetPasswordFromPreference(preference, passwordType)
|
||||
if preferencePassword != "" {
|
||||
return preferencePassword
|
||||
}
|
||||
|
||||
if preference.Name != "default" {
|
||||
passwordID = preference.Name + "_" + passwordID
|
||||
}
|
||||
|
||||
if resetPassword && !RunInBackground {
|
||||
keyringSet(passwordID, "")
|
||||
} else {
|
||||
password := keyringGet(passwordID)
|
||||
if password != "" {
|
||||
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from keychain/keyring", passwordType)
|
||||
return password
|
||||
}
|
||||
|
||||
@@ -176,6 +244,7 @@ func GetPassword(preference Preference, passwordType string, prompt string,
|
||||
|
||||
// SavePassword saves the specified password in the keyring/keychain.
|
||||
func SavePassword(preference Preference, passwordType string, password string) {
|
||||
|
||||
if password == "" || RunInBackground {
|
||||
return
|
||||
}
|
||||
@@ -183,6 +252,12 @@ func SavePassword(preference Preference, passwordType string, password string) {
|
||||
if preference.DoNotSavePassword {
|
||||
return
|
||||
}
|
||||
|
||||
// If the password is retrieved from env or preference, don't save it to keyring
|
||||
if GetPasswordFromPreference(preference, passwordType) == password {
|
||||
return
|
||||
}
|
||||
|
||||
passwordID := passwordType
|
||||
if preference.Name != "default" {
|
||||
passwordID = preference.Name + "_" + passwordID
|
||||
@@ -190,54 +265,6 @@ func SavePassword(preference Preference, passwordType string, password string) {
|
||||
keyringSet(passwordID, password)
|
||||
}
|
||||
|
||||
// RemoveEmptyDirectories remove all empty subdirectoreies under top.
|
||||
func RemoveEmptyDirectories(top string) {
|
||||
|
||||
stack := make([]string, 0, 256)
|
||||
|
||||
stack = append(stack, top)
|
||||
|
||||
for len(stack) > 0 {
|
||||
|
||||
dir := stack[len(stack) - 1]
|
||||
stack = stack[:len(stack) - 1]
|
||||
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() && file.Name()[0] != '.' {
|
||||
stack = append(stack, path.Join(dir, file.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
if os.Remove(dir) != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
dir = path.Dir(dir)
|
||||
for (len(dir) > len(top)) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
if os.Remove(dir) != nil {
|
||||
break;
|
||||
}
|
||||
}
|
||||
dir = path.Dir(dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
|
||||
// Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd'
|
||||
// against '*ccd', and the version here fixed that issue.
|
||||
@@ -308,9 +335,13 @@ func matchPattern(text string, pattern string) bool {
|
||||
// include patterns, and included otherwise.
|
||||
func MatchPath(filePath string, patterns []string) (included bool) {
|
||||
|
||||
allIncludes := true
|
||||
for _, pattern := range patterns {
|
||||
var re *regexp.Regexp = nil
|
||||
var found bool
|
||||
var matched bool
|
||||
|
||||
allIncludes := true
|
||||
|
||||
for _, pattern := range patterns {
|
||||
if pattern[0] == '+' {
|
||||
if matchPattern(filePath, pattern[1:]) {
|
||||
return true
|
||||
@@ -320,6 +351,24 @@ func MatchPath(filePath string, patterns [] string) (included bool) {
|
||||
if matchPattern(filePath, pattern[1:]) {
|
||||
return false
|
||||
}
|
||||
} else if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
if re, found = RegexMap[pattern[2:]]; found {
|
||||
matched = re.MatchString(filePath)
|
||||
} else {
|
||||
re, err := regexp.Compile(pattern)
|
||||
if err != nil {
|
||||
LOG_ERROR("REGEX_ERROR", "Invalid regex encountered for pattern \"%s\" - %v", pattern[2:], err)
|
||||
}
|
||||
RegexMap[pattern] = re
|
||||
matched = re.MatchString(filePath)
|
||||
}
|
||||
if matched {
|
||||
return strings.HasPrefix(pattern, "i:")
|
||||
} else {
|
||||
if strings.HasPrefix(pattern, "e:") {
|
||||
allIncludes = false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -331,11 +380,15 @@ func joinPath(components ...string) string {
|
||||
combinedPath := path.Join(components...)
|
||||
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
|
||||
combinedPath = `\\?\` + filepath.Join(components...)
|
||||
// If the path is on a samba drive we must use the UNC format
|
||||
if strings.HasPrefix(combinedPath, `\\?\\\`) {
|
||||
combinedPath = `\\?\UNC\` + combinedPath[6:]
|
||||
}
|
||||
}
|
||||
return combinedPath
|
||||
}
|
||||
|
||||
func PrettyNumber(number int64) (string) {
|
||||
func PrettyNumber(number int64) string {
|
||||
|
||||
G := int64(1024 * 1024 * 1024)
|
||||
M := int64(1024 * 1024)
|
||||
@@ -354,7 +407,7 @@ func PrettyNumber(number int64) (string) {
|
||||
}
|
||||
}
|
||||
|
||||
func PrettySize(size int64) (string) {
|
||||
func PrettySize(size int64) string {
|
||||
if size > 1024*1024 {
|
||||
return fmt.Sprintf("%.2fM", float64(size)/(1024.0*1024.0))
|
||||
} else if size > 1024 {
|
||||
@@ -364,7 +417,7 @@ func PrettySize(size int64) (string) {
|
||||
}
|
||||
}
|
||||
|
||||
func PrettyTime(seconds int64) (string) {
|
||||
func PrettyTime(seconds int64) string {
|
||||
|
||||
day := int64(3600 * 24)
|
||||
|
||||
@@ -380,7 +433,7 @@ func PrettyTime(seconds int64) (string) {
|
||||
}
|
||||
}
|
||||
|
||||
func AtoSize(sizeString string) (int) {
|
||||
func AtoSize(sizeString string) int {
|
||||
sizeString = strings.ToLower(sizeString)
|
||||
|
||||
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
|
||||
@@ -399,3 +452,10 @@ func AtoSize(sizeString string) (int) {
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
func MinInt(x, y int) int {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
// +build !windows
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"bytes"
|
||||
"syscall"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/gilbertchen/xattr"
|
||||
)
|
||||
@@ -31,7 +31,7 @@ func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
|
||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
||||
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
|
||||
if entry.UID != -1 && entry.GID != -1 {
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
"bytes"
|
||||
|
||||
crypto_rand "crypto/rand"
|
||||
|
||||
"testing"
|
||||
|
||||
)
|
||||
|
||||
func TestMatchPattern(t *testing.T) {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
||||
// User Limitation: 5 users
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
@@ -36,6 +36,7 @@ type reparseDataBuffer struct {
|
||||
// GenericReparseBuffer
|
||||
reparseBuffer byte
|
||||
}
|
||||
|
||||
const (
|
||||
FSCTL_GET_REPARSE_POINT = 0x900A8
|
||||
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
|
||||
@@ -103,7 +104,7 @@ func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
||||
entry.GID = -1
|
||||
}
|
||||
|
||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
|
||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user