mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-02 19:54:54 -06:00
Compare commits
153 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d8573ca789 | ||
|
|
6b2f50a1e8 | ||
|
|
81b8550232 | ||
|
|
f6e2877948 | ||
|
|
3c1057a3c6 | ||
|
|
8808ad5c28 | ||
|
|
707967e91b | ||
|
|
3f83890859 | ||
|
|
68fb6d671e | ||
|
|
b04ef67d26 | ||
|
|
72ba2dfa87 | ||
|
|
b41e8a24a9 | ||
|
|
a3aa575c68 | ||
|
|
e765575210 | ||
|
|
044e1862e5 | ||
|
|
612c5b7746 | ||
|
|
34afc6f93c | ||
|
|
030cd274c2 | ||
|
|
197d20f0e0 | ||
|
|
93cfbf27cb | ||
|
|
46ec852d4d | ||
|
|
dfa6113279 | ||
|
|
d7fdb5fe7f | ||
|
|
37ebbc4736 | ||
|
|
3ae2de241e | ||
|
|
4adb8dbf70 | ||
|
|
41e3d267e5 | ||
|
|
3e23b0c61c | ||
|
|
b7f537de3c | ||
|
|
0c8a88d15a | ||
|
|
204f56e939 | ||
|
|
4a80d94b63 | ||
|
|
3729de1c67 | ||
|
|
6f70b37d61 | ||
|
|
7baf8702a3 | ||
|
|
8fce6f5f83 | ||
|
|
fd362be54a | ||
|
|
0c13da9872 | ||
|
|
4912911017 | ||
|
|
f69550d0db | ||
|
|
799b040913 | ||
|
|
41e3843bfa | ||
|
|
9e1d2ac1e6 | ||
|
|
bc40498d1b | ||
|
|
446bb4bcc8 | ||
|
|
150ea13a0d | ||
|
|
8c5b7d5f63 | ||
|
|
315dfff7d6 | ||
|
|
0bc475ca4d | ||
|
|
a0fa0fe7da | ||
|
|
01db72080c | ||
|
|
22ddc04698 | ||
|
|
2aa3b2b737 | ||
|
|
76f75cb0cb | ||
|
|
ea4c4339e6 | ||
|
|
fa294eabf4 | ||
|
|
0ec262fd93 | ||
|
|
db3e0946bb | ||
|
|
c426bf5af2 | ||
|
|
823b82060c | ||
|
|
4308e3e6e9 | ||
|
|
0391ecf941 | ||
|
|
7ecf895d85 | ||
|
|
a43114da99 | ||
|
|
caaff6b4b2 | ||
|
|
18964e89a1 | ||
|
|
2d1ea86d8e | ||
|
|
d881ac9169 | ||
|
|
1aee9bd6ef | ||
|
|
f3447bb611 | ||
|
|
9be4927c87 | ||
|
|
a0fcb8802b | ||
|
|
58cfeec6ab | ||
|
|
0d442e736d | ||
|
|
b32bda162d | ||
|
|
e6767bfad4 | ||
|
|
0b9e23fcd8 | ||
|
|
7f04a79111 | ||
|
|
211c6867d3 | ||
|
|
4a31fcfb68 | ||
|
|
6a4b1f2a3f | ||
|
|
483ae5e6eb | ||
|
|
f8d879d414 | ||
|
|
c2120ad3d5 | ||
|
|
f8764a5a79 | ||
|
|
736b4da0c3 | ||
|
|
0aa122609a | ||
|
|
18462cf585 | ||
|
|
e06283f0b3 | ||
|
|
b4f3142275 | ||
|
|
cdd1f26079 | ||
|
|
199e312bea | ||
|
|
88141216e9 | ||
|
|
f9ede565ff | ||
|
|
93a61a6e49 | ||
|
|
7d31199631 | ||
|
|
f2451911f2 | ||
|
|
ac655c8780 | ||
|
|
c31d2a30d9 | ||
|
|
83da36cae0 | ||
|
|
96e2f78096 | ||
|
|
593b409329 | ||
|
|
5334f45998 | ||
|
|
b56baa80c3 | ||
|
|
74ab8d8c23 | ||
|
|
a7613ab7d9 | ||
|
|
65127c7ab7 | ||
|
|
09f695b3e1 | ||
|
|
2908b807b9 | ||
|
|
ba3702647b | ||
|
|
0a149cd509 | ||
|
|
2cbb72c2d0 | ||
|
|
12134ea6ad | ||
|
|
4291bc775b | ||
|
|
817e36c7a6 | ||
|
|
b7b54478fc | ||
|
|
8d06fa491a | ||
|
|
42a6ab9140 | ||
|
|
bad990e702 | ||
|
|
d27335ad8d | ||
|
|
a584828e1b | ||
|
|
d0c376f593 | ||
|
|
a54029cf2b | ||
|
|
839be6094f | ||
|
|
84a4c86ca7 | ||
|
|
651d82e511 | ||
|
|
6a73a62591 | ||
|
|
169d6db544 | ||
|
|
25684942b3 | ||
|
|
746431d5e0 | ||
|
|
28da4d15e2 | ||
|
|
d36e80a5eb | ||
|
|
fe1de10f22 | ||
|
|
112d5b22e5 | ||
|
|
3da8830592 | ||
|
|
04b01fa87d | ||
|
|
4b60859054 | ||
|
|
7e5fc0972d | ||
|
|
c9951d6036 | ||
|
|
92b3594e89 | ||
|
|
2424a2eeed | ||
|
|
2ace6c74e1 | ||
|
|
2fcc4d44b9 | ||
|
|
3f45b0a15a | ||
|
|
2d69f64c20 | ||
|
|
7a1a541c98 | ||
|
|
7aa0eca47c | ||
|
|
aa909c0c15 | ||
|
|
9e1740c1d6 | ||
|
|
ae34347741 | ||
|
|
1361b553ac | ||
|
|
c688c501d3 | ||
|
|
c88e148d59 |
@@ -1,4 +1,4 @@
|
|||||||
Duplicacy is based on the following open source project:
|
Duplicacy is based on the following open source projects:
|
||||||
|
|
||||||
| Projects | License |
|
| Projects | License |
|
||||||
|--------|:-------:|
|
|--------|:-------:|
|
||||||
@@ -7,8 +7,10 @@ Duplicacy is based on the following open source project:
|
|||||||
|https://github.com/bkaradzic/go-lz4 | BSD-2-Clause |
|
|https://github.com/bkaradzic/go-lz4 | BSD-2-Clause |
|
||||||
|https://github.com/Azure/azure-sdk-for-go | Apache-2.0 |
|
|https://github.com/Azure/azure-sdk-for-go | Apache-2.0 |
|
||||||
|https://github.com/tj/go-dropbox | MIT |
|
|https://github.com/tj/go-dropbox | MIT |
|
||||||
|https://github.com/goamz/goamz | LGPL-3.0 with static compilation excpetions |
|
|https://github.com/aws/aws-sdk-go | Apache-2.0 |
|
||||||
|
|https://github.com/goamz/goamz | LGPL with static link exception |
|
||||||
|https://github.com/howeyc/gopass | ISC |
|
|https://github.com/howeyc/gopass | ISC |
|
||||||
|https://github.com/tmc/keyring | ISC |
|
|https://github.com/tmc/keyring | ISC |
|
||||||
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|
||||||
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|
||||||
|
|https://github.com/go-ole/go-ole | MIT |
|
||||||
|
|||||||
20
DESIGN.md
20
DESIGN.md
@@ -27,7 +27,7 @@ If exclusive access to a file storage by a single client can be guaranteed, the
|
|||||||
chunks not referenced by any backup and delete them. However, if concurrent access is required, an unreferenced chunk
|
chunks not referenced by any backup and delete them. However, if concurrent access is required, an unreferenced chunk
|
||||||
can't be trivially removed, because of the possibility that a backup procedure in progress may reference the same chunk.
|
can't be trivially removed, because of the possibility that a backup procedure in progress may reference the same chunk.
|
||||||
The ongoing backup procedure, still unknown to the deletion procedure, may have already encountered that chunk during its
|
The ongoing backup procedure, still unknown to the deletion procedure, may have already encountered that chunk during its
|
||||||
file scanning phase, but decided not to upload the chunk again since it already exists in the file storage.
|
file scanning phase, but decided not to upload the chunk again since it already exists in the file storage.
|
||||||
|
|
||||||
Fortunately, there is a solution to address the deletion problem and make lock-free deduplication practical. The solution is a *two-step fossil collection* algorithm that deletes unreferenced chunks in two steps: identify and collect them in the first step, and then permanently remove them once certain conditions are met.
|
Fortunately, there is a solution to address the deletion problem and make lock-free deduplication practical. The solution is a *two-step fossil collection* algorithm that deletes unreferenced chunks in two steps: identify and collect them in the first step, and then permanently remove them once certain conditions are met.
|
||||||
|
|
||||||
@@ -47,7 +47,7 @@ In the first step of the deletion procedure, called the *fossil collection* step
|
|||||||
be saved in a fossil collection file. The deletion procedure then exits without performing further actions. This step has not effectively changed any chunk references due to the first fossil access rule. If a backup procedure references a chunk after it is marked as a fossil, a new chunk will be uploaded because of the second fossil access rule, as shown in Figure 1.
|
be saved in a fossil collection file. The deletion procedure then exits without performing further actions. This step has not effectively changed any chunk references due to the first fossil access rule. If a backup procedure references a chunk after it is marked as a fossil, a new chunk will be uploaded because of the second fossil access rule, as shown in Figure 1.
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/fossil_collection_1.png?raw=true"
|
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/fossil_collection_1.png?raw=true"
|
||||||
alt="Reference after Rename"/>
|
alt="Reference after Rename"/>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
@@ -64,7 +64,7 @@ Therefore, if a backup procedure references a chunk before the chunk is marked a
|
|||||||
delete the chunk until it sees that backup procedure finishes (as indicated by the appearance of a new snapshot file uploaded to the storage). This ensures that scenarios depicted in Figure 2 will never happen.
|
delete the chunk until it sees that backup procedure finishes (as indicated by the appearance of a new snapshot file uploaded to the storage). This ensures that scenarios depicted in Figure 2 will never happen.
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/fossil_collection_2.png?raw=true"
|
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/fossil_collection_2.png?raw=true"
|
||||||
alt="Reference before Rename"/>
|
alt="Reference before Rename"/>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
@@ -128,25 +128,25 @@ and dir1/file3):
|
|||||||
170593,
|
170593,
|
||||||
124309,
|
124309,
|
||||||
1734
|
1734
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
When Duplicacy splits a file in chunks using the variable-size chunking algorithm, if the end of a file is reached and yet the boundary marker for terminating a chunk
|
When Duplicacy splits a file in chunks using the variable-size chunking algorithm, if the end of a file is reached and yet the boundary marker for terminating a chunk
|
||||||
hasn't been found, the next file, if there is one, will be read in and the chunking algorithm continues. It is as if all
|
hasn't been found, the next file, if there is one, will be read in and the chunking algorithm continues. It is as if all
|
||||||
files were packed into a big tar file which is then split into chunks.
|
files were packed into a big tar file which is then split into chunks.
|
||||||
|
|
||||||
The *content* field of a file indicates the indexes of starting and ending chunks and the corresponding offsets. For
|
The *content* field of a file indicates the indexes of starting and ending chunks and the corresponding offsets. For
|
||||||
instance, *file1* starts at chunk 0 offset 0 while ends at chunk 2 offset 6108, immediately followed by *file2*.
|
instance, *file1* starts at chunk 0 offset 0 while ends at chunk 2 offset 6108, immediately followed by *file2*.
|
||||||
|
|
||||||
The backup procedure can run in one of two modes. In the default quick mode, only modified or new files are scanned. Chunks only
|
The backup procedure can run in one of two modes. In the default quick mode, only modified or new files are scanned. Chunks only
|
||||||
referenced by old files that have been modified are removed from the chunk sequence, and then chunks referenced by new
|
referenced by old files that have been modified are removed from the chunk sequence, and then chunks referenced by new
|
||||||
files are appended. Indices for unchanged files need to be updated too.
|
files are appended. Indices for unchanged files need to be updated too.
|
||||||
|
|
||||||
In the safe mode (enabled by the -hash option), all files are scanned and the chunk sequence is regenerated.
|
In the safe mode (enabled by the -hash option), all files are scanned and the chunk sequence is regenerated.
|
||||||
|
|
||||||
The length sequence stores the lengths for all chunks, which are needed when calculating some statistics such as the total
|
The length sequence stores the lengths for all chunks, which are needed when calculating some statistics such as the total
|
||||||
length of chunks. For a repository containing a large number of files, the size of the snapshot file can be tremendous.
|
length of chunks. For a repository containing a large number of files, the size of the snapshot file can be tremendous.
|
||||||
To make the situation worse, every time a big snapshot file would have been uploaded even if only a few files have been changed since
|
To make the situation worse, every time a big snapshot file would have been uploaded even if only a few files have been changed since
|
||||||
last backup. To save space, the variable-size chunking algorithm is also applied to the three dynamic fields of a snapshot
|
last backup. To save space, the variable-size chunking algorithm is also applied to the three dynamic fields of a snapshot
|
||||||
file, *files*, *chunks*, and *lengths*.
|
file, *files*, *chunks*, and *lengths*.
|
||||||
@@ -200,7 +200,7 @@ When encryption is enabled (by the -e option with the *init* or *add* command),
|
|||||||
Here is a diagram showing how these keys are used:
|
Here is a diagram showing how these keys are used:
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/duplicacy_encryption.png?raw=true"
|
<img src="https://github.com/gilbertchen/duplicacy-beta/blob/master/images/duplicacy_encryption.png?raw=true"
|
||||||
alt="encryption"/>
|
alt="encryption"/>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
@@ -210,6 +210,4 @@ Chunk content is encrypted by AES-GCM, with an encryption key that is the HMAC-S
|
|||||||
|
|
||||||
The snapshot is encrypted by AES-GCM too, using an encrypt key that is the HMAC-SHA256 of the file path with the *File Key* as the secret key.
|
The snapshot is encrypted by AES-GCM too, using an encrypt key that is the HMAC-SHA256 of the file path with the *File Key* as the secret key.
|
||||||
|
|
||||||
These four random keys are saved in a file named 'config' in the storage, encrypted with a master key derived from the PBKDF2 function on
|
These four random keys are saved in a file named 'config' in the storage, encrypted with a master key derived from the PBKDF2 function on the storage password chosen by the user.
|
||||||
the storage password chosen by the user.
|
|
||||||
|
|
||||||
|
|||||||
217
GUIDE.md
217
GUIDE.md
@@ -16,22 +16,22 @@ OPTIONS:
|
|||||||
-chunk-size, -c 4M the average size of chunks
|
-chunk-size, -c 4M the average size of chunks
|
||||||
-max-chunk-size, -max 16M the maximum size of chunks (defaults to chunk-size * 4)
|
-max-chunk-size, -max 16M the maximum size of chunks (defaults to chunk-size * 4)
|
||||||
-min-chunk-size, -min 1M the minimum size of chunks (defaults to chunk-size / 4)
|
-min-chunk-size, -min 1M the minimum size of chunks (defaults to chunk-size / 4)
|
||||||
|
-pref-dir <preference directory path> Specify alternate location for .duplicacy preferences directory
|
||||||
```
|
```
|
||||||
|
|
||||||
The *init* command first connects to the storage specified by the storage URL. If the storage has been already been
|
The *init* command first connects to the storage specified by the storage URL. If the storage has been already been initialized before, it will download the storage configuration (stored in the file named *config*) and ignore the options provided in the command line. Otherwise, it will create the configuration file from the options and upload the file.
|
||||||
initialized before, it will download the storage configuration (stored in the file named *config*) and ignore the options provided in the command line. Otherwise, it will create the configuration file from the options and upload the file.
|
|
||||||
|
|
||||||
The initialized storage will then become the default storage for other commands if the -storage option is not specified
|
The initialized storage will then become the default storage for other commands if the `-storage` option is not specified for those commands. This default storage actually has a name, *default*.
|
||||||
for those commands. This default storage actually has a name, *default*.
|
|
||||||
|
|
||||||
After that, it will prepare the the current working directory as the repository to be backed up. Under the hood, it will create a directory
|
After that, it will prepare the current working directory as the repository to be backed up. Under the hood, it will create a directory named *.duplicacy* in the repository and put a file named *preferences* that stores the snapshot id and encryption and storage options.
|
||||||
named *.duplicacy* in the repository and put a file named *preferences* that stores the snapshot id and encryption and storage options.
|
|
||||||
|
|
||||||
The snapshot id is an id used to distinguish different repositories connected to the same storage. Each repository must have a unique snapshot id.
|
The snapshot id is an id used to distinguish different repositories connected to the same storage. Each repository must have a unique snapshot id. A snapshot id must contain only characters valid in Linux and Windows paths (alphabet, digits, underscore, dash, etc), but cannot include `/`, `\`, or `@`.
|
||||||
|
|
||||||
The -e option controls whether or not encryption will be enabled for the storage. If encryption is enabled, you will be prompted to enter a storage password.
|
The `-e` option controls whether or not encryption will be enabled for the storage. If encryption is enabled, you will be prompted to enter a storage password.
|
||||||
|
|
||||||
The three chunk size parameters are passed to the variable-size chunking algorithm. Their values are important to the overall performance, especially for cloud storages. If the chunk size is too small, a lot of overhead will be in sending requests and receiving responses. If the chunk size is too large, the effect of deduplication will be less obvious as more data will need to be transferred with each chunk.
|
The three chunk size parameters are passed to the variable-size chunking algorithm. Their values are important to the overall performance, especially for cloud storages. If the chunk size is too small, a lot of overhead will be in sending requests and receiving responses. If the chunk size is too large, the effect of de-duplication will be less obvious as more data will need to be transferred with each chunk.
|
||||||
|
|
||||||
|
The `-pref-dir` controls the location of the preferences directory. If not specified, a directory named .duplicacy is created in the repository. If specified, it must point to a non-existing directory. The directory is created and a .duplicacy file is created in the repository. The .duplicacy file contains the absolute path name to the preferences directory.
|
||||||
|
|
||||||
Once a storage has been initialized with these parameters, these parameters cannot be modified any more.
|
Once a storage has been initialized with these parameters, these parameters cannot be modified any more.
|
||||||
|
|
||||||
@@ -49,29 +49,24 @@ OPTIONS:
|
|||||||
-t <tag> assign a tag to the backup
|
-t <tag> assign a tag to the backup
|
||||||
-stats show statistics during and after backup
|
-stats show statistics during and after backup
|
||||||
-threads <n> number of uploading threads
|
-threads <n> number of uploading threads
|
||||||
-limit-rate <kB/s> the maximum upload rate (in kilobytes/sec)
|
-limit-rate <kB/s> the maximum upload rate (in kilobytes/sec)
|
||||||
-vss enable the Volume Shadow Copy service (Windows only)
|
-vss enable the Volume Shadow Copy service (Windows only)
|
||||||
-storage <storage name> backup to the specified storage instead of the default one
|
-storage <storage name> backup to the specified storage instead of the default one
|
||||||
```
|
```
|
||||||
|
|
||||||
The *backup* command creates a snapshot of the repository and uploads it to the storage. If -hash is not provided,
|
The *backup* command creates a snapshot of the repository and uploads it to the storage. If `-hash` is not provided,it will upload new or modified files since last backup by comparing file sizes and timestamps. Otherwise, every file is scanned to detect changes.
|
||||||
it will upload new or modified files since last backup by comparing file sizes and timestamps.
|
|
||||||
Otherwise, every file is scanned to detect changes.
|
|
||||||
|
|
||||||
You can assign a tag to the snapshot so that later you can refer to it by tag in other commands.
|
You can assign a tag to the snapshot so that later you can refer to it by tag in other commands.
|
||||||
|
|
||||||
If the -stats option is specified, statistical information such as transfer speed, the number of chunks will be displayed
|
If the `-stats` option is specified, statistical information such as transfer speed, and the number of chunks will be displayed throughout the backup procedure.
|
||||||
throughout the backup procedure.
|
|
||||||
|
|
||||||
The -threads option can be used to specify more than one thread to upload chunks.
|
The `-threads` option can be used to specify more than one thread to upload chunks.
|
||||||
|
|
||||||
The -limit-rate option sets a cape on the maximum upload rate.
|
The `-limit-rate` option sets a cap on the maximum upload rate.
|
||||||
|
|
||||||
The -vss option works on Windows only to turn on the Volume Shadow Copy service such that files opened by other
|
The `-vss` option works on Windows only to turn on the Volume Shadow Copy service such that files opened by other processes with exclusive locks can be read as usual.
|
||||||
processes with exclusive locks can be read as usual.
|
|
||||||
|
|
||||||
When the repository can have multiple storages (added by the *add* command), you can select the storage to back up to
|
When the repository can have multiple storages (added by the *add* command), you can select the storage to back up to by giving a storage name.
|
||||||
by giving a storage name.
|
|
||||||
|
|
||||||
You can specify patterns to include/exclude files by putting them in a file named *.duplicacy/filters*. Please refer to the [Include/Exclude Patterns](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#includeexclude-patterns) section for how to specify the patterns.
|
You can specify patterns to include/exclude files by putting them in a file named *.duplicacy/filters*. Please refer to the [Include/Exclude Patterns](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#includeexclude-patterns) section for how to specify the patterns.
|
||||||
|
|
||||||
@@ -90,29 +85,25 @@ OPTIONS:
|
|||||||
-delete delete files not in the snapshot
|
-delete delete files not in the snapshot
|
||||||
-stats show statistics during and after restore
|
-stats show statistics during and after restore
|
||||||
-threads <n> number of downloading threads
|
-threads <n> number of downloading threads
|
||||||
-limit-rate <kB/s> the maximum download rate (in kilobytes/sec)
|
-limit-rate <kB/s> the maximum download rate (in kilobytes/sec)
|
||||||
-storage <storage name> restore from the specified storage instead of the default one
|
-storage <storage name> restore from the specified storage instead of the default one
|
||||||
```
|
```
|
||||||
|
|
||||||
The *restore* command restores the repository to a previous revision. By default the restore procedure will treat
|
The *restore* command restores the repository to a previous revision. By default the restore procedure will treat files that have the same sizes and timestamps as those in the snapshot as unchanged files, but with the -hash option, every file will be fully scanned to make sure they are in fact unchanged.
|
||||||
files that have the same sizes and timestamps as those in the snapshot as unchanged files, but with the -hash option, every file will be fully scanned to make sure they are in fact unchanged.
|
|
||||||
|
|
||||||
By default the restore procedure will not overwriting existing files, unless the -overwrite option is specified.
|
By default the restore procedure will not overwriting existing files, unless the `-overwrite` option is specified.
|
||||||
|
|
||||||
The -delete option indicates that files not in the snapshot will be removed.
|
The `-delete` option indicates that files not in the snapshot will be removed.
|
||||||
|
|
||||||
If the -stats option is specified, statistical information such as transfer speed, number of chunks will be displayed
|
If the `-stats` option is specified, statistical information such as transfer speed, and number of chunks will be displayed throughout the restore procedure.
|
||||||
throughout the restore procedure.
|
|
||||||
|
|
||||||
The -threads option can be used to specify more than one thread to download chunks.
|
The `-threads` option can be used to specify more than one thread to download chunks.
|
||||||
|
|
||||||
The -limit-rate option sets a cape on the maximum upload rate.
|
The `-limit-rate` option sets a cap on the maximum upload rate.
|
||||||
|
|
||||||
When the repository can have multiple storages (added by the *add* command), you can select the storage to restore from by specifying the storage name.
|
When the repository can have multiple storages (added by the *add* command), you can select the storage to restore from by specifying the storage name.
|
||||||
|
|
||||||
Unlike the *backup* procedure that reading the include/exclude patterns from a file, the *restore* procedure reads them
|
Unlike the *backup* procedure that reading the include/exclude patterns from a file, the *restore* procedure reads them from the command line. If the patterns can cause confusion to the command line argument parser, -- should be prepended to the patterns. Please refer to the [Include/Exclude Patterns](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#includeexclude-patterns) section for how to specify patterns.
|
||||||
from the command line. If the patterns can cause confusion to the command line argument parser, -- should be prepended to
|
|
||||||
the patterns. Please refer to the [Include/Exclude Patterns](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#includeexclude-patterns) section for how to specify patterns.
|
|
||||||
|
|
||||||
|
|
||||||
#### List
|
#### List
|
||||||
@@ -121,7 +112,7 @@ SYNOPSIS:
|
|||||||
duplicacy list - List snapshots
|
duplicacy list - List snapshots
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
duplicacy list [command options]
|
duplicacy list [command options]
|
||||||
|
|
||||||
OPTIONS:
|
OPTIONS:
|
||||||
-all, -a list snapshots with any id
|
-all, -a list snapshots with any id
|
||||||
@@ -130,28 +121,21 @@ OPTIONS:
|
|||||||
-t <tag> list snapshots with the specified tag
|
-t <tag> list snapshots with the specified tag
|
||||||
-files print the file list in each snapshot
|
-files print the file list in each snapshot
|
||||||
-chunks print chunks in each snapshot or all chunks if no snapshot specified
|
-chunks print chunks in each snapshot or all chunks if no snapshot specified
|
||||||
-reset-password take passwords from input rather than keychain/keyring or env
|
-reset-passwords take passwords from input rather than keychain/keyring or env
|
||||||
-storage <storage name> retrieve snapshots from the specified storage
|
-storage <storage name> retrieve snapshots from the specified storage
|
||||||
```
|
```
|
||||||
|
|
||||||
The *list* command lists information about specified snapshots. By default it will list snapshots created from the
|
The *list* command lists information about specified snapshots. By default it will list snapshots created from the current repository, but you can list all snapshots stored in the storage by specifying the -all option, or list snapshots with a different snapshot id using the `-id` option, and/or snapshots with a particular tag with the `-t` option.
|
||||||
current repository, but you can list all snapshots stored in the storage by specifying the -all option, or list snapshots
|
|
||||||
with a different snapshot id using the -id option, and/or snapshots with a particular tag with the -t option.
|
|
||||||
|
|
||||||
The revision number is a number assigned to the snapshot when it is being created. This number will keep increasing
|
The revision number is a number assigned to the snapshot when it is being created. This number will keep increasing every time a new snapshot is created from a repository. You can refer to snapshots by their revision numbers using the `-r` option, which either takes a single revision number `-r 123` or a range `-r 123-456`. There can be multiple `-r` options.
|
||||||
every time a new snapshot is created from a repository. You can refer to snapshots by their revision numbers using
|
|
||||||
the -r option, which either takes a single revision number (-r 123) or a range (-r 123-456).
|
|
||||||
There can be multiple -r options.
|
|
||||||
|
|
||||||
If -files is specified, for each snapshot to be listed, this command will also print information about every file
|
If `-files` is specified, for each snapshot to be listed, this command will also print information about every file contained in the snapshot.
|
||||||
contained in the snapshot.
|
|
||||||
|
|
||||||
If -chunks is specified, the command will also print out every chunk the snapshot references.
|
If `-chunks` is specified, the command will also print out every chunk the snapshot references.
|
||||||
|
|
||||||
The -reset-password option is used to reset stored passwords and to allow passwords to be entered again. Please refer to the [Managing Passwords](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#managing-passwords) section for more information.
|
The `-reset-password` option is used to reset stored passwords and to allow passwords to be entered again. Please refer to the [Managing Passwords](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#managing-passwords) section for more information.
|
||||||
|
|
||||||
When the repository can have multiple storages (added by the *add* command), you can specify the storage to list
|
When the repository can have multiple storages (added by the *add* command), you can specify the storage to list by specifying the storage name.
|
||||||
by specifying the storage name.
|
|
||||||
|
|
||||||
#### Check
|
#### Check
|
||||||
```
|
```
|
||||||
@@ -175,23 +159,15 @@ OPTIONS:
|
|||||||
The *check* command checks, for each specified snapshot, that all referenced chunks exist in the storage.
|
The *check* command checks, for each specified snapshot, that all referenced chunks exist in the storage.
|
||||||
|
|
||||||
By default the *check* command will check snapshots created from the
|
By default the *check* command will check snapshots created from the
|
||||||
current repository, but you can check all snapshots stored in the storage at once by specifying the -all option, or
|
current repository, but you can check all snapshots stored in the storage at once by specifying the `-all` option, or snapshots from a different repository using the `-id` option, and/or snapshots with a particular tag with the `-t` option.
|
||||||
snapshots from a different repository using the -id option, and/or snapshots with a particular tag with the -t option.
|
|
||||||
|
|
||||||
The revision number is a number assigned to the snapshot when it is being created. This number will keep increasing
|
The revision number is a number assigned to the snapshot when it is being created. This number will keep increasing every time a new snapshot is created from a repository. You can refer to snapshots by their revision numbers using the `-r` option, which either takes a single revision number `-r 123` or a range `-r 123-456`. There can be multiple `-r` options.
|
||||||
every time a new snapshot is created from a repository. You can refer to snapshots by their revision numbers using
|
|
||||||
the -r option, which either takes a single revision number (-r 123) or a range (-r 123-456).
|
|
||||||
There can be multiple -r options.
|
|
||||||
|
|
||||||
By default the *check* command only verifies the existence of chunks. To verify the full integrity of a snapshot,
|
By default the *check* command only verifies the existence of chunks. To verify the full integrity of a snapshot, you should specify the `-files` option, which will download chunks and compute file hashes in memory, to make sure that all hashes match.
|
||||||
you should specify the -files option, which will download chunks and compute file hashes in memory, to
|
|
||||||
make sure that all hashes match.
|
|
||||||
|
|
||||||
By default the *check* command does not find fossils. If the -fossils option is specified, it will find
|
By default the *check* command does not find fossils. If the `-fossils` option is specified, it will find the fossil if the referenced chunk does not exist. if the `-resurrect` option is specified, it will turn the fossil back into a chunk.
|
||||||
the fossil if the referenced chunk does not exist. if the -resurrect option is specified, it will turn the fossil back into a chunk.
|
|
||||||
|
|
||||||
When the repository can have multiple storages (added by the *add* command), you can specify the storage to check
|
When the repository can have multiple storages (added by the *add* command), you can specify the storage to check by specifying the storage name.
|
||||||
by specifying the storage name.
|
|
||||||
|
|
||||||
|
|
||||||
#### Cat
|
#### Cat
|
||||||
@@ -214,9 +190,9 @@ The file must be specified with a path relative to the repository.
|
|||||||
|
|
||||||
You can specify a different snapshot id rather than the default id.
|
You can specify a different snapshot id rather than the default id.
|
||||||
|
|
||||||
The -r option is optional. If not specified, the latest revision will be selected.
|
The `-r` option is optional. If not specified, the latest revision will be selected.
|
||||||
|
|
||||||
You can use the -storage option to select a different storage other than the default one.
|
You can use the `-storage` option to select a different storage other than the default one.
|
||||||
|
|
||||||
#### Diff
|
#### Diff
|
||||||
```
|
```
|
||||||
@@ -232,17 +208,15 @@ OPTIONS:
|
|||||||
-hash compute the hashes of on-disk files
|
-hash compute the hashes of on-disk files
|
||||||
-storage <storage name> retrieve files from the specified storage
|
-storage <storage name> retrieve files from the specified storage
|
||||||
```
|
```
|
||||||
The *diff* command compares the same file in two different snapshots if a file is given, otherwise compares the
|
The *diff* command compares the same file in two different snapshots if a file is given, otherwise compares the two snapshots.
|
||||||
two snapshots.
|
|
||||||
|
|
||||||
The file must be specified with a path relative to the repository.
|
The file must be specified with a path relative to the repository.
|
||||||
|
|
||||||
You can specify a different snapshot id rather than the default snapshot id.
|
You can specify a different snapshot id rather than the default snapshot id.
|
||||||
|
|
||||||
If only one revision is given by -r, the right hand side of the comparison will be the on-disk file.
|
If only one revision is given by `-r`, the right hand side of the comparison will be the on-disk file. The `-hash` option can then instruct this command to compute the hash of the file.
|
||||||
The -hash option can then instruct this command to compute the hash of the file.
|
|
||||||
|
|
||||||
You can use the -storage option to select a different storage other than the default one.
|
You can use the `-storage` option to select a different storage other than the default one.
|
||||||
|
|
||||||
#### History
|
#### History
|
||||||
```
|
```
|
||||||
@@ -261,13 +235,11 @@ OPTIONS:
|
|||||||
|
|
||||||
The *history* command shows how the hash, size, and timestamp of a file change over the specified set of revisions.
|
The *history* command shows how the hash, size, and timestamp of a file change over the specified set of revisions.
|
||||||
|
|
||||||
You can specify a different snapshot id rather than the default snapshot id, and multiple -r options to specify the
|
You can specify a different snapshot id rather than the default snapshot id, and multiple `-r` options to specify the set of revisions.
|
||||||
set of revisions.
|
|
||||||
|
|
||||||
The -hash option is to compute the hash of the on-disk file. Otherwise, only the size and timestamp of the on-disk
|
The `-hash` option is to compute the hash of the on-disk file. Otherwise, only the size and timestamp of the on-disk file will be included.
|
||||||
file will be included.
|
|
||||||
|
|
||||||
You can use the -storage option to select a different storage other than the default one.
|
You can use the `-storage` option to select a different storage other than the default one.
|
||||||
|
|
||||||
#### Prune
|
#### Prune
|
||||||
```
|
```
|
||||||
@@ -292,16 +264,11 @@ OPTIONS:
|
|||||||
-storage <storage name> prune snapshots from the specified storage
|
-storage <storage name> prune snapshots from the specified storage
|
||||||
```
|
```
|
||||||
|
|
||||||
The *prune* command implements the two-step fossil collection algorithm. It will first find fossil collection files
|
The *prune* command implements the two-step fossil collection algorithm. It will first find fossil collection files from previous runs and check if contained fossils are eligible for permanent deletion (the fossil deletion step). Then it will search for snapshots to be deleted, mark unreferenced chunks as fossils (by renaming) and save them in a new fossil collection file stored locally (the fossil collection step).
|
||||||
from previous runs and check if contained fossils are eligible for permanent deletion (the fossil deletion step). Then it
|
|
||||||
will search for snapshots to be deleted, mark unreferenced chunks as fossils (by renaming) and save them in a new fossil
|
|
||||||
collection file stored locally (the fossil collection step).
|
|
||||||
|
|
||||||
If a snapshot id is specified, that snapshot id will be used instead of the default one. The -a option will find
|
If a snapshot id is specified, that snapshot id will be used instead of the default one. The `-a` option will find snapshots with any id. Snapshots to be deleted can be specified by revision numbers, by a tag, by retention policies, or by any combination of them.
|
||||||
snapshots with any id. Snapshots to be deleted can be specified by revision numbers, by a tag, by retention policies,
|
|
||||||
or by any combination of them.
|
|
||||||
|
|
||||||
The retention policies are specified by the -keep option, which accepts an argument in the form of two numbers *n:m*, where *n* indicates the number of days between two consecutive snapshots to keep, and *m* means that the policy only applies to snapshots at least *m* day old. If *n* is zero, any snapshots older than *m* days will be removed.
|
The retention policies are specified by the `-keep` option, which accepts an argument in the form of two numbers *n:m*, where *n* indicates the number of days between two consecutive snapshots to keep, and *m* means that the policy only applies to snapshots at least *m* day old. If *n* is zero, any snapshots older than *m* days will be removed.
|
||||||
|
|
||||||
Here are a few sample retention policies:
|
Here are a few sample retention policies:
|
||||||
|
|
||||||
@@ -312,37 +279,28 @@ $ duplicacy prune -keep 30:180 # Keep 1 snapshot every 30 days for snapshots
|
|||||||
$ duplicacy prune -keep 0:360 # Keep no snapshots older than 360 days
|
$ duplicacy prune -keep 0:360 # Keep no snapshots older than 360 days
|
||||||
```
|
```
|
||||||
|
|
||||||
Multiple -keep options must be sorted by their *m* values in decreasing order. For instance, to combine the above policies into one line, it would become:
|
Multiple `-keep` options must be sorted by their *m* values in decreasing order. For instance, to combine the above policies into one line, it would become:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ duplicacy prune -keep 0:360 -keep 30:180 -keep 7:30 -keep 1:7
|
$ duplicacy prune -keep 0:360 -keep 30:180 -keep 7:30 -keep 1:7
|
||||||
```
|
```
|
||||||
|
|
||||||
The -exhaustive option will scan the list of all chunks in the storage, therefore it will find not only
|
The `-exhaustive` option will scan the list of all chunks in the storage, therefore it will find not only unreferenced chunks from deleted snapshots, but also chunks that become unreferenced for other reasons, such as those from an incomplete backup. It will also find any file that does not look like a chunk file. In contrast, a default *prune* command will only identify
|
||||||
unreferenced chunks from deleted snapshots, but also chunks that become unreferenced for other reasons, such as
|
|
||||||
those from an incomplete backup. It will also find any file that does not look like a chunk file.
|
|
||||||
In contrast, a default *prune* command will only identify
|
|
||||||
chunks referenced by deleted snapshots but not any other snapshots.
|
chunks referenced by deleted snapshots but not any other snapshots.
|
||||||
|
|
||||||
The -exclusive option will assume that no other clients are accessing the storage, effectively disabling the
|
The `-exclusive` option will assume that no other clients are accessing the storage, effectively disabling the *two-step fossil collection* algorithm. With this option, the *prune* command will immediately remove unreferenced chunks.
|
||||||
*two-step fossil collection* algorithm. With this option, the *prune* command will immediately remove unreferenced chunks.
|
|
||||||
|
|
||||||
The -dryrun option is used to test what changes the *prune* command would have done. It is guaranteed not to make
|
The `-dry-run` option is used to test what changes the *prune* command would have done. It is guaranteed not to make any changes on the storage, not even creating the local fossil collection file. The following command checks if the chunk directory is clean (i.e., if there are any unreferenced chunks, temporary files, or anything else):
|
||||||
any changes on the storage, not even creating the local fossil collection file. The following command checks if the
|
|
||||||
chunk directory is clean (i.e., if there are any unreferenced chunks, temporary files, or anything else):
|
|
||||||
|
|
||||||
```
|
```
|
||||||
$ duplicacy prune -d -exclusive -exhaustive # Prints out nothing if the chunk directory is clean
|
$ duplicacy prune -d -exclusive -exhaustive # Prints out nothing if the chunk directory is clean
|
||||||
```
|
```
|
||||||
|
|
||||||
The -delete-only option will skip the fossil collection step, while the -collect-only option will skip the fossil deletion step.
|
The `-delete-only` option will skip the fossil collection step, while the `-collect-only` option will skip the fossil deletion step.
|
||||||
|
|
||||||
For fossils collected in the fossil collection step to be eligible for safe deletion in the fossil deletion step, at least
|
For fossils collected in the fossil collection step to be eligible for safe deletion in the fossil deletion step, at least one new snapshot from *each* snapshot id must be created between two runs of the *prune* command. However, some repository may not be set up to back up with a regular schedule, and thus literally blocking other repositories from deleting any fossils. Duplicacy by default will ignore repositories that have no new backup in the past 7 days. It also provide an `-ignore` option that can be used to skip certain repositories when deciding the deletion criteria.
|
||||||
one new snapshot from *each* snapshot id must be created between two runs of the *prune* command. However, some repository
|
|
||||||
may not be set up to back up with a regular schedule, and thus literally blocking other repositories from deleting any fossils. Duplicacy by default will ignore repositories that have no new backup in the past 7 days. It also provide an
|
|
||||||
-ignore option that can be used to skip certain repositories when deciding the deletion criteria.
|
|
||||||
|
|
||||||
You can use the -storage option to select a different storage other than the default one.
|
You can use the `-storage` option to select a different storage other than the default one.
|
||||||
|
|
||||||
|
|
||||||
#### Password
|
#### Password
|
||||||
@@ -381,17 +339,11 @@ OPTIONS:
|
|||||||
-copy <storage name> make the new storage copy-compatible with an existing one
|
-copy <storage name> make the new storage copy-compatible with an existing one
|
||||||
```
|
```
|
||||||
|
|
||||||
The *add* command connects another storage to the current repository. Like the *init* command, if the storage has not
|
The *add* command connects another storage to the current repository. Like the *init* command, if the storage has not been initialized before, a storage configuration file derived from the command line options will be uploaded, but those options will be ignored if the configuration file already exists in the storage.
|
||||||
been initialized before, a storage configuration file derived from the command line options will be uploaded, but those
|
|
||||||
options will be ignored if the configuration file already exists in the storage.
|
|
||||||
|
|
||||||
A unique storage name must be given in order to distinguish it from other storages.
|
A unique storage name must be given in order to distinguish it from other storages.
|
||||||
|
|
||||||
The -copy option is required if later you want to copy snapshots between this storage and another storage.
|
The `-copy` option is required if later you want to copy snapshots between this storage and another storage. Two storages are copy-compatible if they have the same average chunk size, the same maximum chunk size, the same minimum chunk size, the same chunk seed (used in calculating the rolling hash in the variable-size chunks algorithm), and the same hash key. If the `-copy` option is specified, these parameters will be copied from the existing storage rather than from the command line.
|
||||||
Two storages are copy-compatible if they have the same average chunk size, the same maximum chunk size,
|
|
||||||
the same minimum chunk size, the same chunk seed (used in calculating the rolling hash in the variable-size chunks
|
|
||||||
algorithm), and the same hash key. If the -copy option is specified, these parameters will be copied from
|
|
||||||
the existing storage rather than from the command line.
|
|
||||||
|
|
||||||
#### Set
|
#### Set
|
||||||
```
|
```
|
||||||
@@ -413,16 +365,15 @@ OPTIONS:
|
|||||||
|
|
||||||
The *set* command changes the options for the specified storage.
|
The *set* command changes the options for the specified storage.
|
||||||
|
|
||||||
The -e option turns on the storage encryption. If specified as -e=false, it turns off the storage encryption.
|
The `-e` option turns on the storage encryption. If specified as `-e=false`, it turns off the storage encryption.
|
||||||
|
|
||||||
The -no-backup option will not allow backups from this repository to be created.
|
The `-no-backup` option will not allow backups from this repository to be created.
|
||||||
|
|
||||||
The -no-restore option will not allow restoring this repository to a different revision.
|
The `-no-restore` option will not allow restoring this repository to a different revision.
|
||||||
|
|
||||||
The -no-save-password option will require every password or token to be entered every time and not saved anywhere.
|
The `-no-save-password` option will require every password or token to be entered every time and not saved anywhere.
|
||||||
|
|
||||||
The -key and -value options are used to store (in plain text) access keys or tokens need by various storages. Please
|
The `-key` and `-value` options are used to store (in plain text) access keys or tokens need by various storages. Please refer to the [Managing Passwords](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#managing-passwords) section for more details.
|
||||||
refer to the [Managing Passwords](https://github.com/gilbertchen/duplicacy-beta/blob/master/GUIDE.md#managing-passwords) section for more details.
|
|
||||||
|
|
||||||
You can select a storage to change options for by specifying a storage name.
|
You can select a storage to change options for by specifying a storage name.
|
||||||
|
|
||||||
@@ -442,22 +393,34 @@ OPTIONS:
|
|||||||
-to <storage name> copy snapshots to the specified storage
|
-to <storage name> copy snapshots to the specified storage
|
||||||
```
|
```
|
||||||
|
|
||||||
The *copy* command copies snapshots from one storage to another storage. They must be copy-compatible, i.e., some
|
The *copy* command copies snapshots from one storage to another storage. They must be copy-compatible, i.e., some configuration parameters must be the same. One storage must be initialized with the `-copy` option provided by the *add* command.
|
||||||
configuration parameters must be the same. One storage must be initialized with the -copy option provided by the *add* command.
|
|
||||||
|
|
||||||
Instead of copying all snapshots, you can specify a set of snapshots to copy by giving the -r options. The *copy* command
|
Instead of copying all snapshots, you can specify a set of snapshots to copy by giving the `-r` options. The *copy* command preserves the revision numbers, so if a revision number already exists on the destination storage the command will fail.
|
||||||
preserves the revision numbers, so if a revision number already exists on the destination storage the command will fail.
|
|
||||||
|
|
||||||
If no -from option is given, the snapshots from the default storage will be copied. The -to option specified the
|
If no `-from` option is given, the snapshots from the default storage will be copied. The `-to` option specified the destination storage and is required.
|
||||||
destination storage and is required.
|
|
||||||
|
|
||||||
## Include/Exclude Patterns
|
## Include/Exclude Patterns
|
||||||
|
|
||||||
An include pattern starts with +, and an exclude pattern starts with -. Patterns may contain wildcard characters such as * and ? with their normal meaning.
|
An include pattern starts with +, and an exclude pattern starts with -. Patterns may contain wildcard characters * which matches a path string of any length, and ? matches a single character. Note that both * and ? will match any character including the path separator /.
|
||||||
|
|
||||||
|
The path separator is always /, even on Windows.
|
||||||
|
|
||||||
When matching a path against a list of patterns, the path is compared with the part after + or -, one pattern at a time. Therefore, the order of the patterns is significant. If a match with an include pattern is found, the path is said to be included without further comparisons. If a match with an exclude pattern is found, the path is said to be excluded without further comparison. If a match is not found, the path will be excluded if all patterns are include patterns, but included otherwise.
|
When matching a path against a list of patterns, the path is compared with the part after + or -, one pattern at a time. Therefore, the order of the patterns is significant. If a match with an include pattern is found, the path is said to be included without further comparisons. If a match with an exclude pattern is found, the path is said to be excluded without further comparison. If a match is not found, the path will be excluded if all patterns are include patterns, but included otherwise.
|
||||||
|
|
||||||
Patterns ending with a / apply to directories only, and patterns not ending with a / apply to files only. When a directory is excluded, all files and subdirectires under it will also be excluded. Note that the path separator is always /, even on Windows.
|
Patterns ending with a / apply to directories only, and patterns not ending with a / apply to files only. Patterns ending with * and ?, however, apply to both directories and files. When a directory is excluded, all files and subdirectories under it will also be excluded. Therefore, to include a subdirectory, all parent directories must be explicitly included. For instance, the following pattern list doesn't do what is intended, since the `foo` directory will be excluded so the `foo/bar` will never be visited:
|
||||||
|
|
||||||
|
```
|
||||||
|
+foo/bar/*
|
||||||
|
-*
|
||||||
|
```
|
||||||
|
|
||||||
|
The correct way is to include `foo` as well:
|
||||||
|
|
||||||
|
```
|
||||||
|
+foo/bar/*
|
||||||
|
+foo/
|
||||||
|
-*
|
||||||
|
```
|
||||||
|
|
||||||
The following pattern list includes only files under the directory foo/ but not files under the subdirectory foo/bar:
|
The following pattern list includes only files under the directory foo/ but not files under the subdirectory foo/bar:
|
||||||
|
|
||||||
@@ -477,10 +440,10 @@ For the *restore* command, the include/exclude patterns are specified as the com
|
|||||||
Duplicacy will attempt to retrieve in three ways the storage password and the storage-specific access tokens/keys.
|
Duplicacy will attempt to retrieve in three ways the storage password and the storage-specific access tokens/keys.
|
||||||
|
|
||||||
* If a secret vault service is available, Duplicacy will store passwords/keys entered by the user in such a secret vault and later retrieve them when needed. On Mac OS X it is Keychain, and on Linux it is gnome-keyring. On Windows the passwords/keys are encrypted and decrypted by the Data Protection API, and encrypted passwords/keys are stored in the file *.duplicacy/keyring*. However, if the -no-save-password option is specified for the storage, then Duplicacy will not save passwords this way.
|
* If a secret vault service is available, Duplicacy will store passwords/keys entered by the user in such a secret vault and later retrieve them when needed. On Mac OS X it is Keychain, and on Linux it is gnome-keyring. On Windows the passwords/keys are encrypted and decrypted by the Data Protection API, and encrypted passwords/keys are stored in the file *.duplicacy/keyring*. However, if the -no-save-password option is specified for the storage, then Duplicacy will not save passwords this way.
|
||||||
* If an environment variable for a password is provided, Duplicacy will always take it. The table below shows the name of the environment variable for each kind of password. Note that if the storage is not the default one, the storage name will be included in the name of the environment variable.
|
* If an environment variable for a password is provided, Duplicacy will always take it. The table below shows the name of the environment variable for each kind of password. Note that if the storage is not the default one, the storage name will be included in the name of the environment variable (in uppercase). For example, if your storage name is b2, then the environment variable should be named DUPLICACY_B2_PASSWORD.
|
||||||
* If a matching key and its value are saved to the preference file (.duplicacy/preferences) by the *set* command, the value will be used as the password. The last column in the table below lists the name of the preference key for each type of password.
|
* If a matching key and its value are saved to the preference file (.duplicacy/preferences) by the *set* command, the value will be used as the password. The last column in the table below lists the name of the preference key for each type of password.
|
||||||
|
|
||||||
| password type | environment variable (default storage) | environment variable (non-default storage) | key in preferences |
|
| password type | environment variable (default storage) | environment variable (non-default storage in uppercase) | key in preferences |
|
||||||
|:----------------:|:----------------:|:----------------:|:----------------:|
|
|:----------------:|:----------------:|:----------------:|:----------------:|
|
||||||
| storage password | DUPLICACY_PASSWORD | DUPLICACY_<STORAGENAME>_PASSWORD | password |
|
| storage password | DUPLICACY_PASSWORD | DUPLICACY_<STORAGENAME>_PASSWORD | password |
|
||||||
| sftp password | DUPLICACY_SSH_PASSWORD | DUPLICACY_<STORAGENAME>_SSH_PASSWORD | ssh_password |
|
| sftp password | DUPLICACY_SSH_PASSWORD | DUPLICACY_<STORAGENAME>_SSH_PASSWORD | ssh_password |
|
||||||
@@ -497,6 +460,16 @@ Duplicacy will attempt to retrieve in three ways the storage password and the st
|
|||||||
|
|
||||||
Note that the passwords stored in the environment variable and the preference need to be in plaintext and thus are insecure and should be avoided whenever possible.
|
Note that the passwords stored in the environment variable and the preference need to be in plaintext and thus are insecure and should be avoided whenever possible.
|
||||||
|
|
||||||
|
## Cache
|
||||||
|
|
||||||
|
Duplicacy maintains a local cache under the `.duplicacy/cache` folder in the repository. Only snapshot chunks may be stored in this local cache, and file chunks are never cached.
|
||||||
|
|
||||||
|
At the end of a backup operation, Duplicacy will clean up the local cache in such a way that only chunks composing the snapshot file from the last backup will stay in the cache. All other chunks will be removed from the cache. However, if the *prune* command has been run before (which will leave a the `.duplicacy/collection` folder in the repository, then the *backup* command won't perform any cache cleanup and instead defer that to the *prune* command.
|
||||||
|
|
||||||
|
At the end of a prune operation, Duplicacy will remove all chunks from the local cache except those composing the snapshot file from the last backup (those that would be kept by the *backup* command), as well as chunks that contain information about chunks referenced by *all* backups from *all* repositories connected to the same storage url.
|
||||||
|
|
||||||
|
Other commands, such as *list*, *check*, does not clean up the local cache at all, so the local cache may keep growing if many of these commands run consecutively. However, once a *backup* or a *prune* command is invoked, the local cache should shrink to its normal size.
|
||||||
|
|
||||||
## Scripts
|
## Scripts
|
||||||
|
|
||||||
You can instruct Duplicay to run a script before or after executing a command. For example, if you create a bash script with the name *pre-prune* under the *.duplicacy/scripts* directory, this bash script will be run before the *prune* command starts. A script named *post-prune* will be run after the *prune* command finishes. This rule applies to all commands except *init*.
|
You can instruct Duplicacy to run a script before or after executing a command. For example, if you create a bash script with the name *pre-prune* under the *.duplicacy/scripts* directory, this bash script will be run before the *prune* command starts. A script named *post-prune* will be run after the *prune* command finishes. This rule applies to all commands except *init*.
|
||||||
|
|||||||
6
LICENSE.md
Normal file
6
LICENSE.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
Copyright © 2017 Acrosync LLC
|
||||||
|
|
||||||
|
* Free for personal use or commercial trial
|
||||||
|
* Non-trial commercial use requires per-user licenses available from [duplicacy.com](https://duplicacy.com/customer) at a cost of $20 per year
|
||||||
|
* Commercial licenses are not required to restore or manage backups; only the backup command requires a valid commercial license
|
||||||
|
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||||
163
README.md
163
README.md
@@ -4,12 +4,14 @@ Duplicacy is a new generation cross-platform cloud backup tool based on the idea
|
|||||||
|
|
||||||
The repository hosts source code, design documents, and binary releases of the command line version. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
|
The repository hosts source code, design documents, and binary releases of the command line version. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
|
||||||
|
|
||||||
|
There is a special edition of Duplicacy developed for VMware vSphere (ESXi) named [Vertical Backup](https://www.verticalbackup.com) that can back up virtual machine files on ESXi to local drives, network or cloud storages.
|
||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
Duplicacy currently supports major cloud storage providers (Amazon S3, Google Cloud Storage, Microsoft Azure, Dropbox, Backblaze, Google Drive, Microsoft OneDrive, and Hubic) and offers all essential features of a modern backup tool:
|
Duplicacy currently supports major cloud storage providers (Amazon S3, Google Cloud Storage, Microsoft Azure, Dropbox, Backblaze B2, Google Drive, Microsoft OneDrive, and Hubic) and offers all essential features of a modern backup tool:
|
||||||
|
|
||||||
* Incremental backup: only back up what has been changed
|
* Incremental backup: only back up what has been changed
|
||||||
* Full snapshot : although each backup is incremental, it must behave like a full snapshot for easy restore and deletion
|
* Full snapshot: although each backup is incremental, it must behave like a full snapshot for easy restore and deletion
|
||||||
* Deduplication: identical files must be stored as one copy (file-level deduplication), and identical parts from different files must be stored as one copy (block-level deduplication)
|
* Deduplication: identical files must be stored as one copy (file-level deduplication), and identical parts from different files must be stored as one copy (block-level deduplication)
|
||||||
* Encryption: encrypt not only file contents but also file paths, sizes, times, etc.
|
* Encryption: encrypt not only file contents but also file paths, sizes, times, etc.
|
||||||
* Deletion: every backup can be deleted independently without affecting others
|
* Deletion: every backup can be deleted independently without affecting others
|
||||||
@@ -26,16 +28,21 @@ The [design document](https://github.com/gilbertchen/duplicacy-cli/blob/master/D
|
|||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
Duplicacy is written in Go. You can build the executable by running the following commands:
|
<details>
|
||||||
|
<summary>Installation</summary>
|
||||||
|
|
||||||
|
Duplicacy is written in Go. You can run the following command to build the executable (which will be created under `$GOPATH/bin`):
|
||||||
|
|
||||||
```
|
```
|
||||||
git clone https://github.com/gilbertchen/duplicacy.git
|
go get -u github.com/gilbertchen/duplicacy/...
|
||||||
cd duplicacy
|
|
||||||
go get ./...
|
|
||||||
go build main/duplicacy_main.go
|
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also visit the [releases page](https://github.com/gilbertchen/duplicacy-cli/releases/latest) to download the version suitable for your platform. Installation is not needed.
|
You can also visit the [releases page](https://github.com/gilbertchen/duplicacy-cli/releases/latest) to download the pre-built binary suitable for your platform..
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Commands</summary>
|
||||||
|
|
||||||
Once you have the Duplicacy executable on your path, you can change to the directory that you want to back up (called *repository*) and run the *init* command:
|
Once you have the Duplicacy executable on your path, you can change to the directory that you want to back up (called *repository*) and run the *init* command:
|
||||||
|
|
||||||
@@ -52,8 +59,16 @@ You can now create snapshots of the repository by invoking the *backup* command.
|
|||||||
$ duplicacy backup -stats
|
$ duplicacy backup -stats
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The *restore* command rolls back the repository to a previous revision:
|
||||||
|
```sh
|
||||||
|
$ duplicacy restore -r 1
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Duplicacy provides a set of commands, such as list, check, diff, cat history, to manage snapshots:
|
Duplicacy provides a set of commands, such as list, check, diff, cat history, to manage snapshots:
|
||||||
|
|
||||||
|
|
||||||
```makefile
|
```makefile
|
||||||
$ duplicacy list # List all snapshots
|
$ duplicacy list # List all snapshots
|
||||||
$ duplicacy check # Check integrity of snapshots
|
$ duplicacy check # Check integrity of snapshots
|
||||||
@@ -62,10 +77,6 @@ $ duplicacy cat # Print a file in a snapshot
|
|||||||
$ duplicacy history # Show how a file changes over time
|
$ duplicacy history # Show how a file changes over time
|
||||||
```
|
```
|
||||||
|
|
||||||
The *restore* command rolls back the repository to a previous revision:
|
|
||||||
```sh
|
|
||||||
$ duplicacy restore -r 1
|
|
||||||
```
|
|
||||||
|
|
||||||
The *prune* command removes snapshots by revisions, or tags, or retention policies:
|
The *prune* command removes snapshots by revisions, or tags, or retention policies:
|
||||||
|
|
||||||
@@ -103,29 +114,37 @@ $ duplicacy copy -r 1 -to s3 # Copy snapshot at revision 1 to the s3 storage
|
|||||||
$ duplicacy copy -to s3 # Copy every snapshot to the s3 storage
|
$ duplicacy copy -to s3 # Copy every snapshot to the s3 storage
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
|
||||||
The [User Guide](https://github.com/gilbertchen/duplicacy-cli/blob/master/GUIDE.md) contains a complete reference to
|
The [User Guide](https://github.com/gilbertchen/duplicacy-cli/blob/master/GUIDE.md) contains a complete reference to
|
||||||
all commands and other features of Duplicacy.
|
all commands and other features of Duplicacy.
|
||||||
|
|
||||||
|
|
||||||
## Storages
|
## Storages
|
||||||
|
|
||||||
Duplicacy currently supports local file storage, SFTP, and 5 cloud storage providers.
|
Duplicacy currently supports local file storage, SFTP, and many cloud storage providers.
|
||||||
|
|
||||||
#### Local disk
|
<details> <summary>Local disk</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: /path/to/storage (on Linux or Mac OS X)
|
Storage URL: /path/to/storage (on Linux or Mac OS X)
|
||||||
C:\path\to\storage (on Windows)
|
C:\path\to\storage (on Windows)
|
||||||
```
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
#### SFTP
|
<details> <summary>SFTP</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: sftp://username@server/path/to/storage
|
Storage URL: sftp://username@server/path/to/storage (path relative to the home directory)
|
||||||
|
sftp://username@server//path/to/storage (absolute path)
|
||||||
```
|
```
|
||||||
|
|
||||||
Login methods include password authentication and public key authentication. Due to a limitation of the underlying Go SSH library, the key pair for public key authentication must be generated without a passphrase. To work with a key that has a passphrase, you can set up SSH agent forwarding which is also supported by Duplicacy.
|
Login methods include password authentication and public key authentication. Due to a limitation of the underlying Go SSH library, the key pair for public key authentication must be generated without a passphrase. To work with a key that has a passphrase, you can set up SSH agent forwarding which is also supported by Duplicacy.
|
||||||
|
|
||||||
#### Dropbox
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Dropbox</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: dropbox://path/to/storage
|
Storage URL: dropbox://path/to/storage
|
||||||
@@ -139,7 +158,9 @@ For Duplicacy to access your Dropbox storage, you must provide an access token t
|
|||||||
|
|
||||||
Dropbox has two advantages over other cloud providers. First, if you are already a paid user then to use the unused space as the backup storage is basically free. Second, unlike other providers Dropbox does not charge bandwidth or API usage fees.
|
Dropbox has two advantages over other cloud providers. First, if you are already a paid user then to use the unused space as the backup storage is basically free. Second, unlike other providers Dropbox does not charge bandwidth or API usage fees.
|
||||||
|
|
||||||
#### Amazon S3
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Amazon S3</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: s3://amazon.com/bucket/path/to/storage (default region is us-east-1)
|
Storage URL: s3://amazon.com/bucket/path/to/storage (default region is us-east-1)
|
||||||
@@ -148,18 +169,43 @@ Storage URL: s3://amazon.com/bucket/path/to/storage (default region is us-east-
|
|||||||
|
|
||||||
You'll need to input an access key and a secret key to access your Amazon S3 storage.
|
You'll need to input an access key and a secret key to access your Amazon S3 storage.
|
||||||
|
|
||||||
|
Minio-based S3 compatiable storages are also supported by using the `minio` or `minios` backends:
|
||||||
|
```
|
||||||
|
Storage URL: minio://region@host/bucket/path/to/storage (without TLS)
|
||||||
|
Storage URL: minios://region@host/bucket/path/to/storage (with TLS)
|
||||||
|
```
|
||||||
|
|
||||||
#### Google Cloud Storage
|
There is another backend that works with S3 compatible storage providers that require V2 signing:
|
||||||
|
```
|
||||||
|
Storage URL: s3c://region@host/bucket/path/to/storage
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Wasabi</summary>
|
||||||
|
|
||||||
|
```
|
||||||
|
Storage URL: s3://us-east-1@s3.wasabisys.com/bucket/path/to/storage
|
||||||
|
```
|
||||||
|
|
||||||
|
[Wasabi](https://wasabi.com) is a relatively new cloud storage service providing a S3-compatible API.
|
||||||
|
It is well suited for storing backups, because it is much cheaper than Amazon S3 with a storage cost of $.0039/GB/Month and a download fee of $0.04/GB, and no additional charges on API calls.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Google Cloud Storage</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: gcs://bucket/path/to/storage
|
Storage URL: gcs://bucket/path/to/storage
|
||||||
```
|
```
|
||||||
|
|
||||||
Starting from version 2.0.0, a new Google Cloud Storage backend is added which is implemented using the [official Google client library](https://godoc.org/cloud.google.com/go/storage). You must first obtain a credential file by [authorizing](https://duplicacy.com/gcp_start) Dupliacy to access your Google Cloud Storage account or by [downloading](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) a service account credential file.
|
Starting from version 2.0.0, a new Google Cloud Storage backend is added which is implemented using the [official Google client library](https://godoc.org/cloud.google.com/go/storage). You must first obtain a credential file by [authorizing](https://duplicacy.com/gcp_start) Duplicacy to access your Google Cloud Storage account or by [downloading](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) a service account credential file.
|
||||||
|
|
||||||
You can also use the s3 protocol to access Google Cloud Storage. To do this, you must enable the [s3 interoperability](https://cloud.google.com/storage/docs/migrating#migration-simple) in your Google Cloud Storage settings and set the storage url as `s3://storage.googleapis.com/bucket/path/to/storage`.
|
You can also use the s3 protocol to access Google Cloud Storage. To do this, you must enable the [s3 interoperability](https://cloud.google.com/storage/docs/migrating#migration-simple) in your Google Cloud Storage settings and set the storage url as `s3://storage.googleapis.com/bucket/path/to/storage`.
|
||||||
|
|
||||||
#### Microsoft Azure
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Microsoft Azure</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: azure://account/container
|
Storage URL: azure://account/container
|
||||||
@@ -167,7 +213,9 @@ Storage URL: azure://account/container
|
|||||||
|
|
||||||
You'll need to input the access key once prompted.
|
You'll need to input the access key once prompted.
|
||||||
|
|
||||||
#### Backblaze
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Backblaze B2</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: b2://bucket
|
Storage URL: b2://bucket
|
||||||
@@ -175,39 +223,43 @@ Storage URL: b2://bucket
|
|||||||
|
|
||||||
You'll need to input the account id and application key.
|
You'll need to input the account id and application key.
|
||||||
|
|
||||||
Backblaze's B2 storage is not only the least expensive (at 0.5 cent per GB per month), but also the fastest. We have been working closely with their developers to leverage the full potentials provided by the B2 API in order to maximumize the transfer speed.
|
Backblaze's B2 storage is one of the least expensive (at 0.5 cent per GB per month, with a download fee of 2 cents per GB, plus additional charges for API calls).
|
||||||
|
|
||||||
#### Google Drive
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Google Drive</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: gcd://path/to/storage
|
Storage URL: gcd://path/to/storage
|
||||||
```
|
```
|
||||||
|
|
||||||
To use Google Drive as the storage, you first need to download a token file from https://duplicacy.com/gcd_start by
|
To use Google Drive as the storage, you first need to download a token file from https://duplicacy.com/gcd_start by authorizing Duplicacy to access your Google Drive, and then enter the path to this token file to Duplicacy when prompted.
|
||||||
authorizing Duplicacy to access your Google Drive, and then enter the path to this token file to Duplicacy when prompted.
|
|
||||||
|
|
||||||
#### Microsoft OneDrive
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Microsoft OneDrive</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: one://path/to/storage
|
Storage URL: one://path/to/storage
|
||||||
```
|
```
|
||||||
|
|
||||||
To use Microsoft OneDrive as the storage, you first need to download a token file from https://duplicacy.com/one_start by
|
To use Microsoft OneDrive as the storage, you first need to download a token file from https://duplicacy.com/one_start by authorizing Duplicacy to access your OneDrive, and then enter the path to this token file to Duplicacy when prompted.
|
||||||
authorizing Duplicacy to access your OneDrive, and then enter the path to this token file to Duplicacy when prompted.
|
|
||||||
|
|
||||||
#### Hubic
|
</details>
|
||||||
|
|
||||||
|
<details> <summary>Hubic</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
Storage URL: hubic://path/to/storage
|
Storage URL: hubic://path/to/storage
|
||||||
```
|
```
|
||||||
|
|
||||||
To use Hubic as the storage, you first need to download a token file from https://duplicacy.com/hubic_start by
|
To use Hubic as the storage, you first need to download a token file from https://duplicacy.com/hubic_start by authorizing Duplicacy to access your Hubic drive, and then enter the path to this token file to Duplicacy when prompted.
|
||||||
authorizing Duplicacy to access your Hubic drive, and then enter the path to this token file to Duplicacy when prompted.
|
|
||||||
|
|
||||||
Hubic offers the most free space (25GB) of all major cloud providers and there is no bandwidth charge (same as Google Drive and OneDrive), so it may be worth a try.
|
Hubic offers the most free space (25GB) of all major cloud providers and there is no bandwidth charge (same as Google Drive and OneDrive), so it may be worth a try.
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
## Comparison with Other Backup Tools
|
## Feature Comparison with Other Backup Tools
|
||||||
|
|
||||||
[duplicity](http://duplicity.nongnu.org) works by applying the rsync algorithm (or more specific, the [librsync](https://github.com/librsync/librsync) library)
|
[duplicity](http://duplicity.nongnu.org) works by applying the rsync algorithm (or more specific, the [librsync](https://github.com/librsync/librsync) library)
|
||||||
to find the differences from previous backups and only then uploading the differences. It is the only existing backup tool with extensive cloud support -- the [long list](http://duplicity.nongnu.org/duplicity.1.html#sect7) of storage backends covers almost every cloud provider one can think of. However, duplicity's biggest flaw lies in its incremental model -- a chain of dependent backups starts with a full backup followed by a number of incremental ones, and ends when another full backup is uploaded. Deleting one backup will render useless all the subsequent backups on the same chain. Periodic full backups are required, in order to make previous backups disposable.
|
to find the differences from previous backups and only then uploading the differences. It is the only existing backup tool with extensive cloud support -- the [long list](http://duplicity.nongnu.org/duplicity.1.html#sect7) of storage backends covers almost every cloud provider one can think of. However, duplicity's biggest flaw lies in its incremental model -- a chain of dependent backups starts with a full backup followed by a number of incremental ones, and ends when another full backup is uploaded. Deleting one backup will render useless all the subsequent backups on the same chain. Periodic full backups are required, in order to make previous backups disposable.
|
||||||
@@ -220,18 +272,18 @@ Deletion of old backups is possible, but no cloud storages are supported.
|
|||||||
Multiple clients can back up to the same storage, but only sequential access is granted by the [locking on-disk data structures](http://obnam.org/locking/).
|
Multiple clients can back up to the same storage, but only sequential access is granted by the [locking on-disk data structures](http://obnam.org/locking/).
|
||||||
It is unclear if the lack of cloud backends is due to difficulties in porting the locking data structures to cloud storage APIs.
|
It is unclear if the lack of cloud backends is due to difficulties in porting the locking data structures to cloud storage APIs.
|
||||||
|
|
||||||
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model as Obnam, but embraces the variable-size chunk algorithm for better performance and better deduplication. Deletions of old backup is also supported. However, no cloud backends are implemented, as in Obnam. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
|
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model as Obnam, but embraces the variable-size chunk algorithm for better performance and better deduplication. Deletions of old backup is also supported. However, no cloud backends are implemented, as in Obnam. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
|
||||||
[not recommended](http://librelist.com/browser//attic/2014/11/11/backing-up-multiple-servers-into-a-single-repository/#e96345aa5a3469a87786675d65da492b) by the developer due to chunk indices being kept in a local cache.
|
[not recommended](http://librelist.com/browser//attic/2014/11/11/backing-up-multiple-servers-into-a-single-repository/#e96345aa5a3469a87786675d65da492b) by the developer due to chunk indices being kept in a local cache.
|
||||||
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks, and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be adapted for cloud storage APIs and reduces the level of deduplication.
|
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks, and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be adapted for cloud storage APIs and reduces the level of deduplication.
|
||||||
|
|
||||||
[restic](https://restic.github.io) is a more recent addition. It is worth mentioning here because, like Duplicacy, it is written in Go. It uses a format similar to the git packfile format, but not exactly the same. Multiple clients backing up to the same storage are still guarded by
|
[restic](https://restic.github.io) is a more recent addition. It is worth mentioning here because, like Duplicacy, it is written in Go. It uses a format similar to the git packfile format. Multiple clients backing up to the same storage are still guarded by
|
||||||
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks).
|
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks). A prune operation will therefore completely block all other clients connected to the storage from doing their regular backups. Moreover, since most cloud storage services do not provide a locking service, the best effort is to use some basic file operations to simulate a lock, but distributed locking is known to be a hard problem and it is unclear how reliable restic's lock implementation is. A faulty implementation may cause a prune operation to accidentally delete data still in use, resulting in unrecoverable data loss. This is the exact problem that we avoided by taking the lock-free approach.
|
||||||
A command to delete old backups is in the developer's [plan](https://github.com/restic/restic/issues/18). S3 storage is supported, although it is unclear how hard it is to support other cloud storage APIs because of the need for locking. Overall, it still falls in the same category as Attic. Whether it will eventually reach the same level as Attic remains to be seen.
|
|
||||||
|
|
||||||
The following table compares the feature lists of all these backup tools:
|
The following table compares the feature lists of all these backup tools:
|
||||||
|
|
||||||
|
|
||||||
| Feature/Tool | duplicity | bup | Obnam | Attic | restic | **Duplicacy** |
|
| Feature/Tool | duplicity | bup | Obnam | Attic | restic | **Duplicacy** |
|
||||||
|:------------------:|:---------:|:---:|:-----------------:|:---------------:|:-----------------:|:-------------:|
|
|:------------------:|:---------:|:---:|:-----------------:|:---------------:|:-----------------:|:-------------:|
|
||||||
| Incremental Backup | Yes | Yes | Yes | Yes | Yes | **Yes** |
|
| Incremental Backup | Yes | Yes | Yes | Yes | Yes | **Yes** |
|
||||||
| Full Snapshot | No | Yes | Yes | Yes | Yes | **Yes** |
|
| Full Snapshot | No | Yes | Yes | Yes | Yes | **Yes** |
|
||||||
@@ -239,13 +291,36 @@ The following table compares the feature lists of all these backup tools:
|
|||||||
| Encryption | Yes | Yes | Yes | Yes | Yes | **Yes** |
|
| Encryption | Yes | Yes | Yes | Yes | Yes | **Yes** |
|
||||||
| Deletion | No | No | Yes | Yes | No | **Yes** |
|
| Deletion | No | No | Yes | Yes | No | **Yes** |
|
||||||
| Concurrent Access | No | No | Exclusive locking | Not recommended | Exclusive locking | **Lock-free** |
|
| Concurrent Access | No | No | Exclusive locking | Not recommended | Exclusive locking | **Lock-free** |
|
||||||
| Cloud Support | Extensive | No | No | No | S3 only | **S3, GCS, Azure, Dropbox, Backblaze, Google Drive, OneDrive, and Hubic**|
|
| Cloud Support | Extensive | No | No | No | S3, B2, OpenStack | **S3, GCS, Azure, Dropbox, Backblaze B2, Google Drive, OneDrive, and Hubic**|
|
||||||
| Snapshot Migration | No | No | No | No | No | **Yes** |
|
| Snapshot Migration | No | No | No | No | No | **Yes** |
|
||||||
|
|
||||||
|
|
||||||
|
## Performance Comparison with Other Backup Tools
|
||||||
|
|
||||||
|
Duplicacy is not only more feature-rich but also faster than other backup tools. The following table lists the running times in seconds of backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other tools. Clearly Duplicacy is the fastest by a significant margin.
|
||||||
|
|
||||||
|
|
||||||
|
| | Duplicacy | restic | Attic | duplicity |
|
||||||
|
|:------------------:|:----------------:|:----------:|:----------:|:-----------:|
|
||||||
|
| Initial backup | 13.7 | 20.7 | 26.9 | 44.2 |
|
||||||
|
| 2nd backup | 4.8 | 8.0 | 15.4 | 19.5 |
|
||||||
|
| 3rd backup | 6.9 | 11.9 | 19.6 | 29.8 |
|
||||||
|
| 4th backup | 3.3 | 7.0 | 13.7 | 18.6 |
|
||||||
|
| 5th backup | 9.9 | 11.4 | 19.9 | 28.0 |
|
||||||
|
| 6th backup | 3.8 | 8.0 | 16.8 | 22.0 |
|
||||||
|
| 7th backup | 5.1 | 7.8 | 14.3 | 21.6 |
|
||||||
|
| 8th backup | 9.5 | 13.5 | 18.3 | 35.0 |
|
||||||
|
| 9th backup | 4.3 | 9.0 | 15.7 | 24.9 |
|
||||||
|
| 10th backup | 7.9 | 20.2 | 32.2 | 35.0 |
|
||||||
|
| 11th backup | 4.6 | 9.1 | 16.8 | 28.1 |
|
||||||
|
| 12th backup | 7.4 | 12.0 | 21.7 | 37.4 |
|
||||||
|
|
||||||
|
|
||||||
|
For more details and other speed comparison results, please visit https://github.com/gilbertchen/benchmarking. There you can also find test scripts that you can use to run your own experiments.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
Duplicacy CLI is released under the [Fair Source 5 License](https://fair.io), which means it is free for individual users or any company or organization with less than 5 users. If your company or organization has 5 or more users, then a license for the actual number of users must be purchased from [duplicacy.com](https://duplicacy.com/customer).
|
* Free for personal use or commercial trial
|
||||||
|
* Non-trial commercial use requires per-user licenses available from [duplicacy.com](https://duplicacy.com/customer) at a cost of $20 per year
|
||||||
A user is defined as the owner of any files to be backed up by Duplicacy. If you are an IT administrator who uses Duplicacy to back up files for your colleagues, then each colleague will be counted in the user limit permitted by the license.
|
* Commercial licenses are not required to restore or manage backups; only the backup command requires a valid commercial license
|
||||||
|
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
@@ -12,12 +12,15 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"runtime"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"os/signal"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
|
||||||
"github.com/gilbertchen/cli"
|
"github.com/gilbertchen/cli"
|
||||||
|
|
||||||
"github.com/gilbertchen/duplicacy/src"
|
"github.com/gilbertchen/duplicacy/src"
|
||||||
|
"io/ioutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -36,14 +39,14 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
stat, err := os.Stat(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY))
|
stat, err := os.Stat(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY)) //TOKEEP
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to retrieve the information about the directory %s: %v",
|
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to retrieve the information about the directory %s: %v",
|
||||||
repository, err)
|
repository, err)
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if stat != nil && stat.IsDir() {
|
if stat != nil && (stat.IsDir() || stat.Mode().IsRegular()) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -54,10 +57,10 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
|
|||||||
}
|
}
|
||||||
repository = parent
|
repository = parent
|
||||||
}
|
}
|
||||||
|
|
||||||
duplicacy.LoadPreferences(repository)
|
duplicacy.LoadPreferences(repository)
|
||||||
|
|
||||||
duplicacy.SetKeyringFile(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "keyring"))
|
preferencePath := duplicacy.GetDuplicacyPreferencePath()
|
||||||
|
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
|
||||||
|
|
||||||
if storageName == "" {
|
if storageName == "" {
|
||||||
storageName = context.String("storage")
|
storageName = context.String("storage")
|
||||||
@@ -137,25 +140,35 @@ func setGlobalOptions(context *cli.Context) {
|
|||||||
duplicacy.RunInBackground = context.GlobalBool("background")
|
duplicacy.RunInBackground = context.GlobalBool("background")
|
||||||
}
|
}
|
||||||
|
|
||||||
func runScript(context *cli.Context, repository string, storageName string, phase string) bool {
|
func runScript(context *cli.Context, storageName string, phase string) bool {
|
||||||
|
|
||||||
if !ScriptEnabled {
|
if !ScriptEnabled {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
preferencePath := duplicacy.GetDuplicacyPreferencePath()
|
||||||
|
scriptDir, _ := filepath.Abs(path.Join(preferencePath, "scripts"))
|
||||||
|
scriptNames := []string { phase + "-" + context.Command.Name,
|
||||||
|
storageName + "-" + phase + "-" + context.Command.Name }
|
||||||
|
|
||||||
scriptDir, _ := filepath.Abs(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "scripts"))
|
script := ""
|
||||||
scriptName := phase + "-" + context.Command.Name
|
for _, scriptName := range scriptNames {
|
||||||
|
|
||||||
script := path.Join(scriptDir, scriptName)
|
|
||||||
if _, err := os.Stat(script); err != nil {
|
|
||||||
scriptName = storageName + "-" + scriptName
|
|
||||||
script = path.Join(scriptDir, scriptName)
|
script = path.Join(scriptDir, scriptName)
|
||||||
if _, err = os.Stat(script); err != nil {
|
if runtime.GOOS == "windows" {
|
||||||
return false
|
script += ".bat"
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(script); err == nil {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
script = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
duplicacy.LOG_INFO("SCRIPT_RUN", "Running %s script", scriptName)
|
if script == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
duplicacy.LOG_INFO("SCRIPT_RUN", "Running script %s", script)
|
||||||
|
|
||||||
output, err := exec.Command(script, os.Args...).CombinedOutput()
|
output, err := exec.Command(script, os.Args...).CombinedOutput()
|
||||||
for _, line := range strings.Split(string(output), "\n") {
|
for _, line := range strings.Split(string(output), "\n") {
|
||||||
@@ -174,14 +187,14 @@ func runScript(context *cli.Context, repository string, storageName string, phas
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initRepository(context *cli.Context) {
|
func initRepository(context *cli.Context) {
|
||||||
configRespository(context, true)
|
configRepository(context, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func addStorage(context *cli.Context) {
|
func addStorage(context *cli.Context) {
|
||||||
configRespository(context, false)
|
configRepository(context, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func configRespository(context *cli.Context, init bool) {
|
func configRepository(context *cli.Context, init bool) {
|
||||||
|
|
||||||
setGlobalOptions(context)
|
setGlobalOptions(context)
|
||||||
defer duplicacy.CatchLogException()
|
defer duplicacy.CatchLogException()
|
||||||
@@ -220,21 +233,37 @@ func configRespository(context *cli.Context, init bool) {
|
|||||||
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to retrieve the current working directory: %v", err)
|
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to retrieve the current working directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
duplicacyDirectory := path.Join(repository, duplicacy.DUPLICACY_DIRECTORY)
|
preferencePath := context.String("pref-dir")
|
||||||
if stat, _ := os.Stat(path.Join(duplicacyDirectory, "preferences")); stat != nil {
|
if preferencePath == "" {
|
||||||
|
preferencePath = path.Join(repository, duplicacy.DUPLICACY_DIRECTORY) // TOKEEP
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if stat, _ := os.Stat(path.Join(preferencePath, "preferences")); stat != nil {
|
||||||
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
|
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.Mkdir(duplicacyDirectory, 0744)
|
err = os.Mkdir(preferencePath, 0744)
|
||||||
if err != nil && !os.IsExist(err) {
|
if err != nil && !os.IsExist(err) {
|
||||||
duplicacy.LOG_ERROR("REPOSITORY_INIT", "Failed to create the directory %s: %v",
|
duplicacy.LOG_ERROR("REPOSITORY_INIT", "Failed to create the directory %s: %v",
|
||||||
duplicacy.DUPLICACY_DIRECTORY, err)
|
preferencePath, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if context.String("pref-dir") != "" {
|
||||||
duplicacy.SetKeyringFile(path.Join(duplicacyDirectory, "keyring"))
|
// out of tree preference file
|
||||||
|
// write real path into .duplicacy file inside repository
|
||||||
|
duplicacyFileName := path.Join(repository, duplicacy.DUPLICACY_FILE)
|
||||||
|
d1 := []byte(preferencePath)
|
||||||
|
err = ioutil.WriteFile(duplicacyFileName, d1, 0644)
|
||||||
|
if err != nil {
|
||||||
|
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to write %s file inside repository %v", duplicacyFileName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
duplicacy.SetDuplicacyPreferencePath(preferencePath)
|
||||||
|
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
repository, _ = getRepositoryPreference(context, "")
|
repository, _ = getRepositoryPreference(context, "")
|
||||||
@@ -251,7 +280,7 @@ func configRespository(context *cli.Context, init bool) {
|
|||||||
Encrypted: context.Bool("encrypt"),
|
Encrypted: context.Bool("encrypt"),
|
||||||
}
|
}
|
||||||
|
|
||||||
storage := duplicacy.CreateStorage(repository, preference, true, 1)
|
storage := duplicacy.CreateStorage(preference, true, 1)
|
||||||
storagePassword := ""
|
storagePassword := ""
|
||||||
if preference.Encrypted {
|
if preference.Encrypted {
|
||||||
prompt := fmt.Sprintf("Enter storage password for %s:", preference.StorageURL)
|
prompt := fmt.Sprintf("Enter storage password for %s:", preference.StorageURL)
|
||||||
@@ -341,7 +370,7 @@ func configRespository(context *cli.Context, init bool) {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
otherStorage := duplicacy.CreateStorage(repository, *otherPreference, false, 1)
|
otherStorage := duplicacy.CreateStorage(*otherPreference, false, 1)
|
||||||
|
|
||||||
otherPassword := ""
|
otherPassword := ""
|
||||||
if otherPreference.Encrypted {
|
if otherPreference.Encrypted {
|
||||||
@@ -368,7 +397,7 @@ func configRespository(context *cli.Context, init bool) {
|
|||||||
|
|
||||||
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
||||||
|
|
||||||
duplicacy.SavePreferences(repository)
|
duplicacy.SavePreferences()
|
||||||
|
|
||||||
duplicacy.LOG_INFO("REPOSITORY_INIT", "%s will be backed up to %s with id %s",
|
duplicacy.LOG_INFO("REPOSITORY_INIT", "%s will be backed up to %s with id %s",
|
||||||
repository, preference.StorageURL, preference.SnapshotID)
|
repository, preference.StorageURL, preference.SnapshotID)
|
||||||
@@ -489,7 +518,7 @@ func setPreference(context *cli.Context) {
|
|||||||
oldPreference.StorageURL)
|
oldPreference.StorageURL)
|
||||||
} else {
|
} else {
|
||||||
*oldPreference = newPreference
|
*oldPreference = newPreference
|
||||||
duplicacy.SavePreferences(repository)
|
duplicacy.SavePreferences()
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "New options for storage %s have been saved", oldPreference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "New options for storage %s have been saved", oldPreference.StorageURL)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -506,16 +535,18 @@ func changePassword(context *cli.Context) {
|
|||||||
os.Exit(ArgumentExitCode)
|
os.Exit(ArgumentExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
repository, preference := getRepositoryPreference(context, "")
|
_, preference := getRepositoryPreference(context, "")
|
||||||
|
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
|
storage := duplicacy.CreateStorage(*preference, false, 1)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
password := ""
|
password := ""
|
||||||
if preference.Encrypted {
|
if preference.Encrypted {
|
||||||
password = duplicacy.GetPassword(*preference, "password", "Enter old password for storage %s:", false, true)
|
password = duplicacy.GetPassword(*preference, "password",
|
||||||
|
fmt.Sprintf("Enter old password for storage %s:", preference.StorageURL),
|
||||||
|
false, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
config, _, err := duplicacy.DownloadConfig(storage, password)
|
config, _, err := duplicacy.DownloadConfig(storage, password)
|
||||||
@@ -547,7 +578,6 @@ func changePassword(context *cli.Context) {
|
|||||||
duplicacy.LOG_INFO("STORAGE_SET", "The password for storage %s has been changed", preference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "The password for storage %s has been changed", preference.StorageURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func backupRepository(context *cli.Context) {
|
func backupRepository(context *cli.Context) {
|
||||||
setGlobalOptions(context)
|
setGlobalOptions(context)
|
||||||
defer duplicacy.CatchLogException()
|
defer duplicacy.CatchLogException()
|
||||||
@@ -566,7 +596,7 @@ func backupRepository(context *cli.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
threads := context.Int("threads")
|
threads := context.Int("threads")
|
||||||
if threads < 1 {
|
if threads < 1 {
|
||||||
@@ -574,7 +604,7 @@ func backupRepository(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, false, threads)
|
storage := duplicacy.CreateStorage(*preference, false, threads)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -598,10 +628,10 @@ func backupRepository(context *cli.Context) {
|
|||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(repository, preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS)
|
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func restoreRepository(context *cli.Context) {
|
func restoreRepository(context *cli.Context) {
|
||||||
@@ -623,7 +653,7 @@ func restoreRepository(context *cli.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
threads := context.Int("threads")
|
threads := context.Int("threads")
|
||||||
if threads < 1 {
|
if threads < 1 {
|
||||||
@@ -631,7 +661,7 @@ func restoreRepository(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, false, threads)
|
storage := duplicacy.CreateStorage(*preference, false, threads)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -673,10 +703,10 @@ func restoreRepository(context *cli.Context) {
|
|||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(repository, preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, showStatistics, patterns)
|
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, showStatistics, patterns)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func listSnapshots(context *cli.Context) {
|
func listSnapshots(context *cli.Context) {
|
||||||
@@ -693,10 +723,10 @@ func listSnapshots(context *cli.Context) {
|
|||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
resetPassword := context.Bool("reset-passwords")
|
resetPassword := context.Bool("reset-passwords")
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, resetPassword, 1)
|
storage := duplicacy.CreateStorage(*preference, resetPassword, 1)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -723,10 +753,10 @@ func listSnapshots(context *cli.Context) {
|
|||||||
showFiles := context.Bool("files")
|
showFiles := context.Bool("files")
|
||||||
showChunks := context.Bool("chunks")
|
showChunks := context.Bool("chunks")
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(repository, preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
|
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkSnapshots(context *cli.Context) {
|
func checkSnapshots(context *cli.Context) {
|
||||||
@@ -743,9 +773,9 @@ func checkSnapshots(context *cli.Context) {
|
|||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
|
storage := duplicacy.CreateStorage(*preference, false, 1)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -773,10 +803,10 @@ func checkSnapshots(context *cli.Context) {
|
|||||||
searchFossils := context.Bool("fossils")
|
searchFossils := context.Bool("fossils")
|
||||||
resurrect := context.Bool("resurrect")
|
resurrect := context.Bool("resurrect")
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(repository, preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, checkFiles, searchFossils, resurrect)
|
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, checkFiles, searchFossils, resurrect)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func printFile(context *cli.Context) {
|
func printFile(context *cli.Context) {
|
||||||
@@ -791,11 +821,11 @@ func printFile(context *cli.Context) {
|
|||||||
|
|
||||||
repository, preference := getRepositoryPreference(context, "")
|
repository, preference := getRepositoryPreference(context, "")
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
// Do not print out storage for this command
|
// Do not print out storage for this command
|
||||||
//duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
//duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
|
storage := duplicacy.CreateStorage(*preference, false, 1)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -815,7 +845,7 @@ func printFile(context *cli.Context) {
|
|||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(repository, preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
|
|
||||||
file := ""
|
file := ""
|
||||||
if len(context.Args()) > 0 {
|
if len(context.Args()) > 0 {
|
||||||
@@ -823,7 +853,7 @@ func printFile(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
backupManager.SnapshotManager.PrintFile(snapshotID, revision, file)
|
backupManager.SnapshotManager.PrintFile(snapshotID, revision, file)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func diff(context *cli.Context) {
|
func diff(context *cli.Context) {
|
||||||
@@ -838,10 +868,10 @@ func diff(context *cli.Context) {
|
|||||||
|
|
||||||
repository, preference := getRepositoryPreference(context, "")
|
repository, preference := getRepositoryPreference(context, "")
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
|
storage := duplicacy.CreateStorage(*preference, false, 1)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -872,10 +902,10 @@ func diff(context *cli.Context) {
|
|||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(repository, preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash)
|
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func showHistory(context *cli.Context) {
|
func showHistory(context *cli.Context) {
|
||||||
@@ -890,10 +920,10 @@ func showHistory(context *cli.Context) {
|
|||||||
|
|
||||||
repository, preference := getRepositoryPreference(context, "")
|
repository, preference := getRepositoryPreference(context, "")
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
|
storage := duplicacy.CreateStorage(*preference, false, 1)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -915,10 +945,10 @@ func showHistory(context *cli.Context) {
|
|||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(repository, preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SnapshotManager.ShowHistory(repository, snapshotID, revisions, path, showLocalHash)
|
backupManager.SnapshotManager.ShowHistory(repository, snapshotID, revisions, path, showLocalHash)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func pruneSnapshots(context *cli.Context) {
|
func pruneSnapshots(context *cli.Context) {
|
||||||
@@ -933,10 +963,10 @@ func pruneSnapshots(context *cli.Context) {
|
|||||||
|
|
||||||
repository, preference := getRepositoryPreference(context, "")
|
repository, preference := getRepositoryPreference(context, "")
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL)
|
||||||
storage := duplicacy.CreateStorage(repository, *preference, false, 1)
|
storage := duplicacy.CreateStorage(*preference, false, 1)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -973,11 +1003,11 @@ func pruneSnapshots(context *cli.Context) {
|
|||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(repository, preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SnapshotManager.PruneSnapshots(repository, selfID, snapshotID, revisions, tags, retentions,
|
backupManager.SnapshotManager.PruneSnapshots(selfID, snapshotID, revisions, tags, retentions,
|
||||||
exhaustive, exclusive, ignoredIDs, dryRun, deleteOnly, collectOnly)
|
exhaustive, exclusive, ignoredIDs, dryRun, deleteOnly, collectOnly)
|
||||||
|
|
||||||
runScript(context, repository, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func copySnapshots(context *cli.Context) {
|
func copySnapshots(context *cli.Context) {
|
||||||
@@ -990,12 +1020,17 @@ func copySnapshots(context *cli.Context) {
|
|||||||
os.Exit(ArgumentExitCode)
|
os.Exit(ArgumentExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
threads := context.Int("threads")
|
||||||
|
if threads < 1 {
|
||||||
|
threads = 1
|
||||||
|
}
|
||||||
|
|
||||||
repository, source := getRepositoryPreference(context, context.String("from"))
|
repository, source := getRepositoryPreference(context, context.String("from"))
|
||||||
|
|
||||||
runScript(context, repository, source.Name, "pre")
|
runScript(context, source.Name, "pre")
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Source storage set to %s", source.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Source storage set to %s", source.StorageURL)
|
||||||
sourceStorage := duplicacy.CreateStorage(repository, *source, false, 1)
|
sourceStorage := duplicacy.CreateStorage(*source, false, threads)
|
||||||
if sourceStorage == nil {
|
if sourceStorage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1006,7 +1041,7 @@ func copySnapshots(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword)
|
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword)
|
||||||
sourceManager.SetupSnapshotCache(repository, source.Name)
|
sourceManager.SetupSnapshotCache(source.Name)
|
||||||
duplicacy.SavePassword(*source, "password", sourcePassword)
|
duplicacy.SavePassword(*source, "password", sourcePassword)
|
||||||
|
|
||||||
|
|
||||||
@@ -1025,7 +1060,7 @@ func copySnapshots(context *cli.Context) {
|
|||||||
|
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
||||||
destinationStorage := duplicacy.CreateStorage(repository, *destination, false, 1)
|
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
|
||||||
if destinationStorage == nil {
|
if destinationStorage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1036,13 +1071,13 @@ func copySnapshots(context *cli.Context) {
|
|||||||
"Enter destination storage password:",false, false)
|
"Enter destination storage password:",false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
sourceStorage.SetRateLimits(context.Int("download-rate-limit"), 0)
|
sourceStorage.SetRateLimits(context.Int("download-limit-rate"), 0)
|
||||||
destinationStorage.SetRateLimits(0, context.Int("upload-rate-limit"))
|
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
|
||||||
|
|
||||||
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
|
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
|
||||||
destinationPassword)
|
destinationPassword)
|
||||||
duplicacy.SavePassword(*destination, "password", destinationPassword)
|
duplicacy.SavePassword(*destination, "password", destinationPassword)
|
||||||
destinationManager.SetupSnapshotCache(repository, destination.Name)
|
destinationManager.SetupSnapshotCache(destination.Name)
|
||||||
|
|
||||||
revisions := getRevisions(context)
|
revisions := getRevisions(context)
|
||||||
snapshotID := ""
|
snapshotID := ""
|
||||||
@@ -1050,13 +1085,8 @@ func copySnapshots(context *cli.Context) {
|
|||||||
snapshotID = context.String("id")
|
snapshotID = context.String("id")
|
||||||
}
|
}
|
||||||
|
|
||||||
threads := context.Int("threads")
|
|
||||||
if threads < 1 {
|
|
||||||
threads = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, threads)
|
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, threads)
|
||||||
runScript(context, repository, source.Name, "post")
|
runScript(context, source.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
func infoStorage(context *cli.Context) {
|
func infoStorage(context *cli.Context) {
|
||||||
@@ -1071,7 +1101,9 @@ func infoStorage(context *cli.Context) {
|
|||||||
|
|
||||||
repository := context.String("repository")
|
repository := context.String("repository")
|
||||||
if repository != "" {
|
if repository != "" {
|
||||||
duplicacy.SetKeyringFile(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "keyring"))
|
preferencePath := path.Join(repository, duplicacy.DUPLICACY_DIRECTORY)
|
||||||
|
duplicacy.SetDuplicacyPreferencePath(preferencePath)
|
||||||
|
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
|
||||||
}
|
}
|
||||||
|
|
||||||
isEncrypted := context.Bool("e")
|
isEncrypted := context.Bool("e")
|
||||||
@@ -1088,7 +1120,7 @@ func infoStorage(context *cli.Context) {
|
|||||||
password = duplicacy.GetPassword(preference, "password", "Enter the storage password:", false, false)
|
password = duplicacy.GetPassword(preference, "password", "Enter the storage password:", false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage := duplicacy.CreateStorage("", preference, context.Bool("reset-passwords"), 1)
|
storage := duplicacy.CreateStorage(preference, context.Bool("reset-passwords"), 1)
|
||||||
config, isStorageEncrypted, err := duplicacy.DownloadConfig(storage, password)
|
config, isStorageEncrypted, err := duplicacy.DownloadConfig(storage, password)
|
||||||
|
|
||||||
if isStorageEncrypted {
|
if isStorageEncrypted {
|
||||||
@@ -1132,6 +1164,11 @@ func main() {
|
|||||||
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
|
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
|
||||||
Argument: "1M",
|
Argument: "1M",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "pref-dir",
|
||||||
|
Usage: "Specify alternate location for .duplicacy preferences directory (absolute or relative to current directory)",
|
||||||
|
Argument: "<preferences directory path>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
||||||
ArgsUsage: "<snapshot id> <storage url>",
|
ArgsUsage: "<snapshot id> <storage url>",
|
||||||
@@ -1658,7 +1695,18 @@ func main() {
|
|||||||
app.Name = "duplicacy"
|
app.Name = "duplicacy"
|
||||||
app.HelpName = "duplicacy"
|
app.HelpName = "duplicacy"
|
||||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||||
app.Version = "2.0.2"
|
app.Version = "2.0.9"
|
||||||
|
|
||||||
|
// If the program is interrupted, call the RunAtError function.
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(c, os.Interrupt)
|
||||||
|
go func() {
|
||||||
|
for _ = range c {
|
||||||
|
duplicacy.RunAtError()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
err := app.Run(os.Args)
|
err := app.Run(os.Args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Exit(2)
|
os.Exit(2)
|
||||||
21
integration_tests/copy_test.sh
Executable file
21
integration_tests/copy_test.sh
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1k
|
||||||
|
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
|
||||||
|
add_file file1
|
||||||
|
add_file file2
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} copy -from default -to secondary
|
||||||
|
add_file file3
|
||||||
|
add_file file4
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} copy -from default -to secondary
|
||||||
|
${DUPLICACY} check --files -stats
|
||||||
|
${DUPLICACY} check --files -stats -storage secondary
|
||||||
|
popd
|
||||||
18
integration_tests/fixed_test.sh
Executable file
18
integration_tests/fixed_test.sh
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Sanity test for the fixed-size chunking algorithm
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 64 -max 64 -min 64
|
||||||
|
|
||||||
|
add_file file3
|
||||||
|
add_file file4
|
||||||
|
|
||||||
|
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} check --files -stats
|
||||||
|
popd
|
||||||
38
integration_tests/resume_test.sh
Executable file
38
integration_tests/resume_test.sh
Executable file
@@ -0,0 +1,38 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 4
|
||||||
|
|
||||||
|
# Create 10 small files
|
||||||
|
add_file file1 20
|
||||||
|
add_file file2 20
|
||||||
|
rm file3; touch file3
|
||||||
|
add_file file4 20
|
||||||
|
chmod u-r file4
|
||||||
|
add_file file5 20
|
||||||
|
add_file file6 20
|
||||||
|
add_file file7 20
|
||||||
|
add_file file8 20
|
||||||
|
add_file file9 20
|
||||||
|
add_file file10 20
|
||||||
|
|
||||||
|
# Fail at the 10th chunk
|
||||||
|
env DUPLICACY_FAIL_CHUNK=10 ${DUPLICACY} backup
|
||||||
|
|
||||||
|
# Try it again to test the multiple-resume case
|
||||||
|
env DUPLICACY_FAIL_CHUNK=5 ${DUPLICACY} backup
|
||||||
|
add_file file1 20
|
||||||
|
add_file file2 20
|
||||||
|
|
||||||
|
# Fail the backup before uploading the snapshot
|
||||||
|
env DUPLICACY_FAIL_SNAPSHOT=true ${DUPLICACY} backup
|
||||||
|
|
||||||
|
# Now complete the backup
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} check --files
|
||||||
|
popd
|
||||||
28
integration_tests/sparse_test.sh
Executable file
28
integration_tests/sparse_test.sh
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Testing backup and restore of sparse files
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1m
|
||||||
|
|
||||||
|
for i in `seq 1 10`; do
|
||||||
|
dd if=/dev/urandom of=file3 bs=1000 count=1000 seek=$((100000 * $i))
|
||||||
|
done
|
||||||
|
|
||||||
|
ls -lsh file3
|
||||||
|
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} check --files -stats
|
||||||
|
|
||||||
|
rm file1 file3
|
||||||
|
|
||||||
|
${DUPLICACY} restore -r 1
|
||||||
|
${DUPLICACY} -v restore -r 1 -overwrite -stats -hash
|
||||||
|
|
||||||
|
ls -lsh file3
|
||||||
|
|
||||||
|
popd
|
||||||
18
integration_tests/test.sh
Executable file
18
integration_tests/test.sh
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
init_repo_pref_dir
|
||||||
|
|
||||||
|
backup
|
||||||
|
add_file file3
|
||||||
|
backup
|
||||||
|
add_file file4
|
||||||
|
chmod u-r ${TEST_REPO}/file4
|
||||||
|
backup
|
||||||
|
add_file file5
|
||||||
|
restore
|
||||||
|
check
|
||||||
|
|
||||||
123
integration_tests/test_functions.sh
Normal file
123
integration_tests/test_functions.sh
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
get_abs_filename() {
|
||||||
|
# $1 : relative filename
|
||||||
|
echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
|
||||||
|
}
|
||||||
|
|
||||||
|
pushd () {
|
||||||
|
command pushd "$@" > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
popd () {
|
||||||
|
command popd "$@" > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Functions used to create integration tests suite
|
||||||
|
|
||||||
|
DUPLICACY=$(get_abs_filename ../duplicacy_main)
|
||||||
|
|
||||||
|
# Base directory where test repositories will be created
|
||||||
|
TEST_ZONE=$HOME/DUPLICACY_TEST_ZONE
|
||||||
|
# Test Repository
|
||||||
|
TEST_REPO=$TEST_ZONE/TEST_REPO
|
||||||
|
|
||||||
|
# Storage for test ( For now, only local path storage is supported by test suite)
|
||||||
|
TEST_STORAGE=$TEST_ZONE/TEST_STORAGE
|
||||||
|
|
||||||
|
# Extra storage for copy operation
|
||||||
|
SECONDARY_STORAGE=$TEST_ZONE/SECONDARY_STORAGE
|
||||||
|
|
||||||
|
# Preference directory ( for testing the -pref-dir option)
|
||||||
|
DUPLICACY_PREF_DIR=$TEST_ZONE/TEST_DUPLICACY_PREF_DIR
|
||||||
|
|
||||||
|
# Scratch pad for testing restore
|
||||||
|
TEST_RESTORE_POINT=$TEST_ZONE/RESTORE_POINT
|
||||||
|
|
||||||
|
# Make sure $TEST_ZONE is in know state
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
function fixture()
|
||||||
|
{
|
||||||
|
# clean TEST_RESTORE_POINT
|
||||||
|
rm -rf $TEST_RESTORE_POINT
|
||||||
|
mkdir -p $TEST_RESTORE_POINT
|
||||||
|
|
||||||
|
# clean TEST_STORAGE
|
||||||
|
rm -rf $TEST_STORAGE
|
||||||
|
mkdir -p $TEST_STORAGE
|
||||||
|
|
||||||
|
# clean SECONDARY_STORAGE
|
||||||
|
rm -rf $SECONDARY_STORAGE
|
||||||
|
mkdir -p $SECONDARY_STORAGE
|
||||||
|
|
||||||
|
|
||||||
|
# clean TEST_DOT_DUPLICACY
|
||||||
|
rm -rf $DUPLICACY_PREF_DIR
|
||||||
|
mkdir -p $DUPLICACY_PREF_DIR
|
||||||
|
|
||||||
|
# Create test repository
|
||||||
|
rm -rf ${TEST_REPO}
|
||||||
|
mkdir -p ${TEST_REPO}
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
echo "file1" > file1
|
||||||
|
mkdir dir1
|
||||||
|
echo "file2" > dir1/file2
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
|
function init_repo()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE
|
||||||
|
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
|
||||||
|
${DUPLICACY} backup
|
||||||
|
popd
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function init_repo_pref_dir()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init -pref-dir "${DUPLICACY_PREF_DIR}" integration-tests ${TEST_STORAGE}
|
||||||
|
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
|
||||||
|
${DUPLICACY} backup
|
||||||
|
popd
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function add_file()
|
||||||
|
{
|
||||||
|
FILE_NAME=$1
|
||||||
|
FILE_SIZE=${2:-20000000}
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
dd if=/dev/urandom of=${FILE_NAME} bs=1 count=$(($RANDOM % ${FILE_SIZE})) &> /dev/null
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function backup()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} copy -from default -to secondary
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function restore()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} restore -r 2 -delete
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
|
function check()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} check -files
|
||||||
|
${DUPLICACY} check -storage secondary -files
|
||||||
|
popd
|
||||||
|
}
|
||||||
17
integration_tests/threaded_test.sh
Executable file
17
integration_tests/threaded_test.sh
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1k
|
||||||
|
|
||||||
|
add_file file3
|
||||||
|
add_file file4
|
||||||
|
|
||||||
|
|
||||||
|
${DUPLICACY} backup -threads 16
|
||||||
|
${DUPLICACY} check --files -stats
|
||||||
|
popd
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -14,14 +14,13 @@ import (
|
|||||||
type AzureStorage struct {
|
type AzureStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
clients []*storage.BlobStorageClient
|
containers []*storage.Container
|
||||||
container string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAzureStorage(accountName string, accountKey string,
|
func CreateAzureStorage(accountName string, accountKey string,
|
||||||
container string, threads int) (azureStorage *AzureStorage, err error) {
|
containerName string, threads int) (azureStorage *AzureStorage, err error) {
|
||||||
|
|
||||||
var clients []*storage.BlobStorageClient
|
var containers []*storage.Container
|
||||||
for i := 0; i < threads; i++ {
|
for i := 0; i < threads; i++ {
|
||||||
|
|
||||||
client, err := storage.NewBasicClient(accountName, accountKey)
|
client, err := storage.NewBasicClient(accountName, accountKey)
|
||||||
@@ -31,21 +30,21 @@ func CreateAzureStorage(accountName string, accountKey string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
blobService := client.GetBlobService()
|
blobService := client.GetBlobService()
|
||||||
clients = append(clients, &blobService)
|
container := blobService.GetContainerReference(containerName)
|
||||||
|
containers = append(containers, container)
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, err := clients[0].ContainerExists(container)
|
exist, err := containers[0].Exists()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !exist {
|
if !exist {
|
||||||
return nil, fmt.Errorf("container %s does not exist", container)
|
return nil, fmt.Errorf("container %s does not exist", containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
azureStorage = &AzureStorage {
|
azureStorage = &AzureStorage {
|
||||||
clients: clients,
|
containers: containers,
|
||||||
container: container,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@@ -77,7 +76,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
|
|
||||||
results, err := azureStorage.clients[threadIndex].ListBlobs(azureStorage.container, parameters)
|
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -115,14 +114,15 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
|||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
_, err = storage.clients[threadIndex].DeleteBlobIfExists(storage.container, filePath)
|
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
source := storage.clients[threadIndex].GetBlobURL(storage.container, from)
|
source := storage.containers[threadIndex].GetBlobReference(from)
|
||||||
err = storage.clients[threadIndex].CopyBlob(storage.container, to, source)
|
destination := storage.containers[threadIndex].GetBlobReference(to)
|
||||||
|
err = destination.Copy(source.GetURL(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -136,7 +136,8 @@ func (storage *AzureStorage) CreateDirectory(threadIndex int, dir string) (err e
|
|||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
properties, err := storage.clients[threadIndex].GetBlobProperties(storage.container, filePath)
|
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||||
|
err = blob.GetProperties(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "404") {
|
if strings.Contains(err.Error(), "404") {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
@@ -145,7 +146,7 @@ func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exis
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, false, properties.ContentLength, nil
|
return true, false, blob.Properties.ContentLength, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
@@ -167,21 +168,22 @@ func (storage *AzureStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
readCloser, err := storage.clients[threadIndex].GetBlob(storage.container, filePath)
|
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.clients))
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.containers))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.clients))
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.containers))
|
||||||
return storage.clients[threadIndex].CreateBlockBlobFromReader(storage.container, filePath, uint64(len(content)), reader, nil)
|
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||||
|
return blob.CreateBlockBlobFromReader(reader, nil)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -135,7 +135,7 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
|
|||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode < 400 {
|
if response.StatusCode < 300 {
|
||||||
return response.Body, response.ContentLength, nil
|
return response.Body, response.ContentLength, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,6 +160,10 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
|
|||||||
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
|
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
|
||||||
backoff = client.retry(backoff, response)
|
backoff = client.retry(backoff, response)
|
||||||
continue
|
continue
|
||||||
|
} else {
|
||||||
|
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||||
|
backoff = client.retry(backoff, response)
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
@@ -487,7 +491,7 @@ func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit in
|
|||||||
io.Copy(ioutil.Discard, response.Body)
|
io.Copy(ioutil.Discard, response.Body)
|
||||||
response.Body.Close()
|
response.Body.Close()
|
||||||
|
|
||||||
if response.StatusCode < 400 {
|
if response.StatusCode < 300 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -13,9 +13,11 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"time"
|
"time"
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"strings"
|
"strings"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"runtime"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
@@ -70,11 +72,12 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
|||||||
|
|
||||||
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
|
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
|
||||||
// directory
|
// directory
|
||||||
func (manager *BackupManager) SetupSnapshotCache(top string, storageName string) bool {
|
func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||||
|
|
||||||
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
|
cacheDir := path.Join(preferencePath, "cache", storageName)
|
||||||
|
|
||||||
cacheDir := path.Join(top, DUPLICACY_DIRECTORY, "cache", storageName)
|
storage, err := CreateFileStorage(cacheDir, 2, false, 1)
|
||||||
|
|
||||||
storage, err := CreateFileStorage(cacheDir, 1)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("BACKUP_CACHE", "Failed to create the snapshot cache dir: %v", err)
|
LOG_ERROR("BACKUP_CACHE", "Failed to create the snapshot cache dir: %v", err)
|
||||||
return false
|
return false
|
||||||
@@ -93,11 +96,19 @@ func (manager *BackupManager) SetupSnapshotCache(top string, storageName string)
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
|
||||||
|
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
|
||||||
|
// original unchanged entry list.
|
||||||
|
//
|
||||||
|
// This function assumes the Size field of each entry is equal to the length of the chunk content that belong
|
||||||
|
// to the file.
|
||||||
func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
|
func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The following code works by iterating over 'entries' and 'chunkLength' and keeping track of the
|
||||||
|
// accumulated total file size and the accumulated total chunk size.
|
||||||
i := 0
|
i := 0
|
||||||
totalChunkSize := int64(0)
|
totalChunkSize := int64(0)
|
||||||
totalFileSize := entries[i].Size
|
totalFileSize := entries[i].Size
|
||||||
@@ -114,6 +125,8 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the current file ends at the end of the current chunk, the next file will
|
||||||
|
// start at the next chunk
|
||||||
if totalChunkSize + int64(length) == totalFileSize {
|
if totalChunkSize + int64(length) == totalFileSize {
|
||||||
entries[i].StartChunk = j + 1 + offset
|
entries[i].StartChunk = j + 1 + offset
|
||||||
entries[i].StartOffset = 0
|
entries[i].StartOffset = 0
|
||||||
@@ -125,8 +138,17 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
|
|||||||
totalFileSize += entries[i].Size
|
totalFileSize += entries[i].Size
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if i >= len(entries) {
|
||||||
|
break
|
||||||
|
}
|
||||||
totalChunkSize += int64(length)
|
totalChunkSize += int64(length)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If there are some unvisited entries (which happens when saving an incomplete snapshot),
|
||||||
|
// set their sizes to -1 so they won't be saved to the incomplete snapshot
|
||||||
|
for j := i; j < len(entries); j++ {
|
||||||
|
entries[j].Size = -1
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Backup creates a snapshot for the repository 'top'. If 'quickMode' is true, only files with different sizes
|
// Backup creates a snapshot for the repository 'top'. If 'quickMode' is true, only files with different sizes
|
||||||
@@ -149,7 +171,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
|
|
||||||
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
|
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
|
||||||
if remoteSnapshot == nil {
|
if remoteSnapshot == nil {
|
||||||
quickMode = false
|
|
||||||
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
|
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
|
||||||
LOG_INFO("BACKUP_START", "No previous backup found")
|
LOG_INFO("BACKUP_START", "No previous backup found")
|
||||||
} else {
|
} else {
|
||||||
@@ -170,35 +191,79 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
// UploadChunk.
|
// UploadChunk.
|
||||||
chunkCache := make(map[string]bool)
|
chunkCache := make(map[string]bool)
|
||||||
|
|
||||||
|
var incompleteSnapshot *Snapshot
|
||||||
|
|
||||||
|
// A revision number of 0 means this is the initial backup
|
||||||
if remoteSnapshot.Revision > 0 {
|
if remoteSnapshot.Revision > 0 {
|
||||||
// Add all chunks in the last snapshot to the
|
// Add all chunks in the last snapshot to the cache
|
||||||
for _, chunkID := range manager.SnapshotManager.GetSnapshotChunks(remoteSnapshot) {
|
for _, chunkID := range manager.SnapshotManager.GetSnapshotChunks(remoteSnapshot) {
|
||||||
chunkCache[chunkID] = true
|
chunkCache[chunkID] = true
|
||||||
}
|
}
|
||||||
} else if manager.storage.IsFastListing() {
|
} else {
|
||||||
// If the listing operation is fast, list all chunks and put them in the cache.
|
|
||||||
LOG_INFO("BACKUP_LIST", "Listing all chunks")
|
// In quick mode, attempt to load the incomplete snapshot from last incomplete backup if there is one.
|
||||||
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
|
if quickMode {
|
||||||
|
incompleteSnapshot = LoadIncompleteSnapshot()
|
||||||
for _, chunk := range allChunks {
|
|
||||||
if len(chunk) == 0 || chunk[len(chunk) - 1] == '/' {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasSuffix(chunk, ".fsl") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk = strings.Replace(chunk, "/", "", -1)
|
|
||||||
chunkCache[chunk] = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the listing operation is fast or there is an incomplete snapshot, list all chunks and
|
||||||
|
// put them in the cache.
|
||||||
|
if manager.storage.IsFastListing() || incompleteSnapshot != nil {
|
||||||
|
LOG_INFO("BACKUP_LIST", "Listing all chunks")
|
||||||
|
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
|
||||||
|
|
||||||
|
for _, chunk := range allChunks {
|
||||||
|
if len(chunk) == 0 || chunk[len(chunk) - 1] == '/' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasSuffix(chunk, ".fsl") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk = strings.Replace(chunk, "/", "", -1)
|
||||||
|
chunkCache[chunk] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if incompleteSnapshot != nil {
|
||||||
|
|
||||||
|
// This is the last chunk from the incomplete snapshot that can be found in the cache
|
||||||
|
lastCompleteChunk := -1
|
||||||
|
for i, chunkHash := range incompleteSnapshot.ChunkHashes {
|
||||||
|
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
||||||
|
if _, ok := chunkCache[chunkID]; ok {
|
||||||
|
lastCompleteChunk = i
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only keep those files whose chunks exist in the cache
|
||||||
|
var files []*Entry
|
||||||
|
for _, file := range incompleteSnapshot.Files {
|
||||||
|
if file.StartChunk <= lastCompleteChunk && file.EndChunk <= lastCompleteChunk {
|
||||||
|
files = append(files, file)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
incompleteSnapshot.Files = files
|
||||||
|
|
||||||
|
// Remove incomplete chunks (they may not have been uploaded)
|
||||||
|
incompleteSnapshot.ChunkHashes = incompleteSnapshot.ChunkHashes[:lastCompleteChunk + 1]
|
||||||
|
incompleteSnapshot.ChunkLengths = incompleteSnapshot.ChunkLengths[:lastCompleteChunk + 1]
|
||||||
|
remoteSnapshot = incompleteSnapshot
|
||||||
|
LOG_INFO("FILE_SKIP", "Skipped %d files from previous incomplete backup", len(files))
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var numberOfNewFileChunks int // number of new file chunks
|
var numberOfNewFileChunks int64 // number of new file chunks
|
||||||
var totalUploadedFileChunkLength int64 // total length of uploaded file chunks
|
var totalUploadedFileChunkLength int64 // total length of uploaded file chunks
|
||||||
var totalUploadedFileChunkBytes int64 // how many actual bytes have been uploaded
|
var totalUploadedFileChunkBytes int64 // how many actual bytes have been uploaded
|
||||||
|
|
||||||
var numberOfNewSnapshotChunks int // number of new snapshot chunks
|
|
||||||
var totalUploadedSnapshotChunkLength int64 // size of uploaded snapshot chunks
|
var totalUploadedSnapshotChunkLength int64 // size of uploaded snapshot chunks
|
||||||
var totalUploadedSnapshotChunkBytes int64 // how many actual bytes have been uploaded
|
var totalUploadedSnapshotChunkBytes int64 // how many actual bytes have been uploaded
|
||||||
|
|
||||||
@@ -210,10 +275,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
var modifiedEntries [] *Entry // Files that has been modified or newly created
|
var modifiedEntries [] *Entry // Files that has been modified or newly created
|
||||||
var preservedEntries [] *Entry // Files unchanges
|
var preservedEntries [] *Entry // Files unchanges
|
||||||
|
|
||||||
// If the quick mode is enabled, we simply treat all files as if they were new, and break them into chunks.
|
// If the quick mode is disable and there isn't an incomplete snapshot from last (failed) backup,
|
||||||
|
// we simply treat all files as if they were new, and break them into chunks.
|
||||||
// Otherwise, we need to find those that are new or recently modified
|
// Otherwise, we need to find those that are new or recently modified
|
||||||
|
|
||||||
if !quickMode {
|
if remoteSnapshot.Revision == 0 && incompleteSnapshot == nil {
|
||||||
modifiedEntries = localSnapshot.Files
|
modifiedEntries = localSnapshot.Files
|
||||||
for _, entry := range modifiedEntries {
|
for _, entry := range modifiedEntries {
|
||||||
totalModifiedFileSize += entry.Size
|
totalModifiedFileSize += entry.Size
|
||||||
@@ -267,7 +333,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
var preservedChunkHashes []string
|
var preservedChunkHashes []string
|
||||||
var preservedChunkLengths []int
|
var preservedChunkLengths []int
|
||||||
|
|
||||||
// For each preserved file, adjust the indices StartChunk and EndChunk. This is done by finding gaps
|
// For each preserved file, adjust the StartChunk and EndChunk pointers. This is done by finding gaps
|
||||||
// between these indices and subtracting the number of deleted chunks.
|
// between these indices and subtracting the number of deleted chunks.
|
||||||
last := -1
|
last := -1
|
||||||
deletedChunks := 0
|
deletedChunks := 0
|
||||||
@@ -294,6 +360,13 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
var uploadedEntries [] *Entry
|
var uploadedEntries [] *Entry
|
||||||
var uploadedChunkHashes []string
|
var uploadedChunkHashes []string
|
||||||
var uploadedChunkLengths []int
|
var uploadedChunkLengths []int
|
||||||
|
var uploadedChunkLock = &sync.Mutex{}
|
||||||
|
|
||||||
|
// Set all file sizes to -1 to indicate they haven't been processed. This must be done before creating the file
|
||||||
|
// reader because the file reader may skip inaccessible files on construction.
|
||||||
|
for _, entry := range modifiedEntries {
|
||||||
|
entry.Size = -1
|
||||||
|
}
|
||||||
|
|
||||||
// the file reader implements the Reader interface. When an EOF is encounter, it opens the next file unless it
|
// the file reader implements the Reader interface. When an EOF is encounter, it opens the next file unless it
|
||||||
// is the last file.
|
// is the last file.
|
||||||
@@ -314,9 +387,48 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
keepUploadAlive = int64(value)
|
keepUploadAlive = int64(value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Fail at the chunk specified by DUPLICACY_FAIL_CHUNK to simulate a backup error
|
||||||
|
chunkToFail := -1
|
||||||
|
if value, found := os.LookupEnv("DUPLICACY_FAIL_CHUNK"); found {
|
||||||
|
chunkToFail, _ = strconv.Atoi(value)
|
||||||
|
LOG_INFO("SNAPSHOT_FAIL", "Will abort the backup on chunk %d", chunkToFail)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
chunkMaker := CreateChunkMaker(manager.config, false)
|
chunkMaker := CreateChunkMaker(manager.config, false)
|
||||||
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
|
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
|
||||||
|
|
||||||
|
localSnapshotReady := false
|
||||||
|
var once sync.Once
|
||||||
|
|
||||||
|
if remoteSnapshot.Revision == 0 {
|
||||||
|
// In case an error occurs during the initial backup, save the incomplete snapshot
|
||||||
|
RunAtError = func() {
|
||||||
|
once.Do(
|
||||||
|
func() {
|
||||||
|
if !localSnapshotReady {
|
||||||
|
// Lock it to gain exclusive access to uploadedChunkHashes and uploadedChunkLengths
|
||||||
|
uploadedChunkLock.Lock()
|
||||||
|
setEntryContent(uploadedEntries, uploadedChunkLengths, len(preservedChunkHashes))
|
||||||
|
if len(preservedChunkHashes) > 0 {
|
||||||
|
//localSnapshot.Files = preservedEntries
|
||||||
|
//localSnapshot.Files = append(preservedEntries, uploadedEntries...)
|
||||||
|
localSnapshot.ChunkHashes = preservedChunkHashes
|
||||||
|
localSnapshot.ChunkHashes = append(localSnapshot.ChunkHashes, uploadedChunkHashes...)
|
||||||
|
localSnapshot.ChunkLengths = preservedChunkLengths
|
||||||
|
localSnapshot.ChunkLengths = append(localSnapshot.ChunkLengths, uploadedChunkLengths...)
|
||||||
|
} else {
|
||||||
|
//localSnapshot.Files = uploadedEntries
|
||||||
|
localSnapshot.ChunkHashes = uploadedChunkHashes
|
||||||
|
localSnapshot.ChunkLengths = uploadedChunkLengths
|
||||||
|
}
|
||||||
|
uploadedChunkLock.Unlock()
|
||||||
|
}
|
||||||
|
SaveIncompleteSnapshot(localSnapshot)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if fileReader.CurrentFile != nil {
|
if fileReader.CurrentFile != nil {
|
||||||
|
|
||||||
LOG_TRACE("PACK_START", "Packing %s", fileReader.CurrentEntry.Path)
|
LOG_TRACE("PACK_START", "Packing %s", fileReader.CurrentEntry.Path)
|
||||||
@@ -337,16 +449,16 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
LOG_DEBUG("CHUNK_CACHE", "Skipped chunk %s in cache", chunk.GetID())
|
LOG_DEBUG("CHUNK_CACHE", "Skipped chunk %s in cache", chunk.GetID())
|
||||||
} else {
|
} else {
|
||||||
if uploadSize > 0 {
|
if uploadSize > 0 {
|
||||||
numberOfNewFileChunks++
|
atomic.AddInt64(&numberOfNewFileChunks, 1)
|
||||||
totalUploadedFileChunkLength += int64(chunkSize)
|
atomic.AddInt64(&totalUploadedFileChunkLength, int64(chunkSize))
|
||||||
totalUploadedFileChunkBytes += int64(uploadSize)
|
atomic.AddInt64(&totalUploadedFileChunkBytes, int64(uploadSize))
|
||||||
action = "Uploaded"
|
action = "Uploaded"
|
||||||
} else {
|
} else {
|
||||||
LOG_DEBUG("CHUNK_EXIST", "Skipped chunk %s in the storage", chunk.GetID())
|
LOG_DEBUG("CHUNK_EXIST", "Skipped chunk %s in the storage", chunk.GetID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadedModifiedFileSize += int64(chunkSize)
|
uploadedModifiedFileSize := atomic.AddInt64(&uploadedModifiedFileSize, int64(chunkSize))
|
||||||
|
|
||||||
if IsTracing() || showStatistics {
|
if IsTracing() || showStatistics {
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
@@ -397,24 +509,33 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
chunkUploader.StartChunk(chunk, chunkIndex)
|
chunkUploader.StartChunk(chunk, chunkIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Must lock it because the RunAtError function called by other threads may access these two slices
|
||||||
|
uploadedChunkLock.Lock()
|
||||||
uploadedChunkHashes = append(uploadedChunkHashes, hash)
|
uploadedChunkHashes = append(uploadedChunkHashes, hash)
|
||||||
uploadedChunkLengths = append(uploadedChunkLengths, chunkSize)
|
uploadedChunkLengths = append(uploadedChunkLengths, chunkSize)
|
||||||
|
uploadedChunkLock.Unlock()
|
||||||
|
|
||||||
|
if len(uploadedChunkHashes) == chunkToFail {
|
||||||
|
LOG_ERROR("SNAPSHOT_FAIL", "Artificially fail the chunk %d for testing purposes", chunkToFail)
|
||||||
|
}
|
||||||
|
|
||||||
},
|
},
|
||||||
func (fileSize int64, hash string) (io.Reader, bool) {
|
func (fileSize int64, hash string) (io.Reader, bool) {
|
||||||
|
|
||||||
|
// Must lock here because the RunAtError function called by other threads may access uploadedEntries
|
||||||
|
uploadedChunkLock.Lock()
|
||||||
|
defer uploadedChunkLock.Unlock()
|
||||||
|
|
||||||
// This function is called when a new file is needed
|
// This function is called when a new file is needed
|
||||||
entry := fileReader.CurrentEntry
|
entry := fileReader.CurrentEntry
|
||||||
entry.Hash = hash
|
entry.Hash = hash
|
||||||
if entry.Size != fileSize {
|
entry.Size = fileSize
|
||||||
totalModifiedFileSize += fileSize - entry.Size
|
|
||||||
entry.Size = fileSize
|
|
||||||
}
|
|
||||||
uploadedEntries = append(uploadedEntries, entry)
|
uploadedEntries = append(uploadedEntries, entry)
|
||||||
|
|
||||||
if !showStatistics || IsTracing() || RunInBackground {
|
if !showStatistics || IsTracing() || RunInBackground {
|
||||||
LOG_INFO("PACK_END", "Packed %s (%d)", entry.Path, entry.Size)
|
LOG_INFO("PACK_END", "Packed %s (%d)", entry.Path, entry.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileReader.NextFile()
|
fileReader.NextFile()
|
||||||
|
|
||||||
if fileReader.CurrentFile != nil {
|
if fileReader.CurrentFile != nil {
|
||||||
@@ -444,20 +565,28 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
localSnapshot.ChunkLengths = uploadedChunkLengths
|
localSnapshot.ChunkLengths = uploadedChunkLengths
|
||||||
}
|
}
|
||||||
|
|
||||||
|
localSnapshotReady = true
|
||||||
|
|
||||||
localSnapshot.EndTime = time.Now().Unix()
|
localSnapshot.EndTime = time.Now().Unix()
|
||||||
|
|
||||||
err = manager.SnapshotManager.CheckSnapshot(localSnapshot)
|
err = manager.SnapshotManager.CheckSnapshot(localSnapshot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
RunAtError = func() {} // Don't save the incomplete snapshot
|
||||||
LOG_ERROR("SNAPSHOT_CHECK", "The snapshot contains an error: %v", err)
|
LOG_ERROR("SNAPSHOT_CHECK", "The snapshot contains an error: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
localSnapshot.Tag = tag
|
localSnapshot.Tag = tag
|
||||||
localSnapshot.Options = ""
|
localSnapshot.Options = ""
|
||||||
if !quickMode {
|
if !quickMode || remoteSnapshot.Revision == 0 {
|
||||||
localSnapshot.Options = "-hash"
|
localSnapshot.Options = "-hash"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if _, found := os.LookupEnv("DUPLICACY_FAIL_SNAPSHOT"); found {
|
||||||
|
LOG_ERROR("SNAPSHOT_FAIL", "Artificially fail the backup for testing purposes")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if shadowCopy {
|
if shadowCopy {
|
||||||
if localSnapshot.Options == "" {
|
if localSnapshot.Options == "" {
|
||||||
localSnapshot.Options = "-vss"
|
localSnapshot.Options = "-vss"
|
||||||
@@ -493,7 +622,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, dir := range skippedDirectories {
|
for _, dir := range skippedDirectories {
|
||||||
LOG_WARN("SKIP_DIRECTORY", "Subdirecotry %s cannot be listed", dir)
|
LOG_WARN("SKIP_DIRECTORY", "Subdirectory %s cannot be listed", dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range fileReader.SkippedFiles {
|
for _, file := range fileReader.SkippedFiles {
|
||||||
@@ -504,6 +633,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
manager.SnapshotManager.CleanSnapshotCache(localSnapshot, nil)
|
manager.SnapshotManager.CleanSnapshotCache(localSnapshot, nil)
|
||||||
LOG_INFO("BACKUP_END", "Backup for %s at revision %d completed", top, localSnapshot.Revision)
|
LOG_INFO("BACKUP_END", "Backup for %s at revision %d completed", top, localSnapshot.Revision)
|
||||||
|
|
||||||
|
RunAtError = func() {}
|
||||||
|
RemoveIncompleteSnapshot()
|
||||||
|
|
||||||
totalSnapshotChunks := len(localSnapshot.FileSequence) + len(localSnapshot.ChunkSequence) +
|
totalSnapshotChunks := len(localSnapshot.FileSequence) + len(localSnapshot.ChunkSequence) +
|
||||||
len(localSnapshot.LengthSequence)
|
len(localSnapshot.LengthSequence)
|
||||||
@@ -527,7 +658,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
LOG_INFO("BACKUP_STATS", "All chunks: %d total, %s bytes; %d new, %s bytes, %s bytes uploaded",
|
LOG_INFO("BACKUP_STATS", "All chunks: %d total, %s bytes; %d new, %s bytes, %s bytes uploaded",
|
||||||
len(localSnapshot.ChunkHashes) + totalSnapshotChunks,
|
len(localSnapshot.ChunkHashes) + totalSnapshotChunks,
|
||||||
PrettyNumber(totalFileChunkLength + totalSnapshotChunkLength),
|
PrettyNumber(totalFileChunkLength + totalSnapshotChunkLength),
|
||||||
numberOfNewFileChunks + numberOfNewSnapshotChunks,
|
int(numberOfNewFileChunks) + numberOfNewSnapshotChunks,
|
||||||
PrettyNumber(totalUploadedFileChunkLength + totalUploadedSnapshotChunkLength),
|
PrettyNumber(totalUploadedFileChunkLength + totalUploadedSnapshotChunkLength),
|
||||||
PrettyNumber(totalUploadedFileChunkBytes + totalUploadedSnapshotChunkBytes))
|
PrettyNumber(totalUploadedFileChunkBytes + totalUploadedSnapshotChunkBytes))
|
||||||
|
|
||||||
@@ -585,6 +716,11 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
LOG_DEBUG("RESTORE_PARAMETERS", "top: %s, revision: %d, in-place: %t, quick: %t, delete: %t",
|
LOG_DEBUG("RESTORE_PARAMETERS", "top: %s, revision: %d, in-place: %t, quick: %t, delete: %t",
|
||||||
top, revision, inPlace, quickMode, deleteMode)
|
top, revision, inPlace, quickMode, deleteMode)
|
||||||
|
|
||||||
|
if !strings.HasPrefix(GetDuplicacyPreferencePath(), top) {
|
||||||
|
LOG_INFO("RESTORE_INPLACE", "Forcing in-place mode with a non-default preference path")
|
||||||
|
inPlace = true
|
||||||
|
}
|
||||||
|
|
||||||
if len(patterns) > 0 {
|
if len(patterns) > 0 {
|
||||||
for _, pattern := range patterns {
|
for _, pattern := range patterns {
|
||||||
LOG_TRACE("RESTORE_PATTERN", "%s", pattern)
|
LOG_TRACE("RESTORE_PATTERN", "%s", pattern)
|
||||||
@@ -600,6 +736,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// How will behave restore when repo created using -repo-dir ,??
|
||||||
err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744)
|
err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744)
|
||||||
if err != nil && !os.IsExist(err) {
|
if err != nil && !os.IsExist(err) {
|
||||||
LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err)
|
LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err)
|
||||||
@@ -645,6 +782,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
i := 0
|
i := 0
|
||||||
for _, entry := range remoteSnapshot.Files {
|
for _, entry := range remoteSnapshot.Files {
|
||||||
|
|
||||||
|
skipped := false
|
||||||
// Find local files that don't exist in the remote snapshot
|
// Find local files that don't exist in the remote snapshot
|
||||||
for i < len(localSnapshot.Files) {
|
for i < len(localSnapshot.Files) {
|
||||||
local := localSnapshot.Files[i]
|
local := localSnapshot.Files[i]
|
||||||
@@ -656,11 +794,18 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
} else {
|
} else {
|
||||||
if compare == 0 {
|
if compare == 0 {
|
||||||
i++
|
i++
|
||||||
|
if quickMode && local.IsSameAs(entry) {
|
||||||
|
skipped = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if skipped {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
fullPath := joinPath(top, entry.Path)
|
fullPath := joinPath(top, entry.Path)
|
||||||
if entry.IsLink() {
|
if entry.IsLink() {
|
||||||
stat, err := os.Lstat(fullPath)
|
stat, err := os.Lstat(fullPath)
|
||||||
@@ -774,7 +919,9 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
|
|
||||||
|
|
||||||
if deleteMode && len(patterns) == 0 {
|
if deleteMode && len(patterns) == 0 {
|
||||||
for _, file := range extraFiles {
|
// Reverse the order to make sure directories are empty before being deleted
|
||||||
|
for i := range extraFiles {
|
||||||
|
file := extraFiles[len(extraFiles) - 1 - i]
|
||||||
fullPath := joinPath(top, file)
|
fullPath := joinPath(top, file)
|
||||||
os.Remove(fullPath)
|
os.Remove(fullPath)
|
||||||
LOG_INFO("RESTORE_DELETE", "Deleted %s", file)
|
LOG_INFO("RESTORE_DELETE", "Deleted %s", file)
|
||||||
@@ -788,8 +935,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RemoveEmptyDirectories(top)
|
|
||||||
|
|
||||||
if showStatistics {
|
if showStatistics {
|
||||||
for _, file := range downloadedFiles {
|
for _, file := range downloadedFiles {
|
||||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size)
|
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size)
|
||||||
@@ -882,7 +1027,7 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
|||||||
totalUploadedSnapshotChunkSize += int64(chunkSize)
|
totalUploadedSnapshotChunkSize += int64(chunkSize)
|
||||||
totalUploadedSnapshotChunkBytes += int64(uploadSize)
|
totalUploadedSnapshotChunkBytes += int64(uploadSize)
|
||||||
} else {
|
} else {
|
||||||
LOG_DEBUG("CHUNK_EXIST", "Skipped snpashot chunk %s in the storage", chunk.GetID())
|
LOG_DEBUG("CHUNK_EXIST", "Skipped snapshot chunk %s in the storage", chunk.GetID())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -978,8 +1123,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
|
|
||||||
var existingFile, newFile *os.File
|
var existingFile, newFile *os.File
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
temporaryPath := path.Join(top, DUPLICACY_DIRECTORY, "temporary")
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
|
temporaryPath := path.Join(preferencePath, "temporary")
|
||||||
fullPath := joinPath(top, entry.Path)
|
fullPath := joinPath(top, entry.Path)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@@ -1005,33 +1151,44 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
var offset int64
|
var offset int64
|
||||||
|
|
||||||
existingFile, err = os.Open(fullPath)
|
existingFile, err = os.Open(fullPath)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil {
|
||||||
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
|
if os.IsNotExist(err) {
|
||||||
}
|
// macOS has no sparse file support
|
||||||
|
if inPlace && entry.Size > 100 * 1024 * 1024 && runtime.GOOS != "darwin" {
|
||||||
|
// Create an empty sparse file
|
||||||
|
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
fileHash := ""
|
n := int64(1)
|
||||||
if existingFile != nil {
|
// There is a go bug on Windows (https://github.com/golang/go/issues/21681) that causes Seek to fail
|
||||||
// Break existing file into chunks.
|
// if the lower 32 bit of the offset argument is 0xffffffff. Therefore we need to avoid that value by increasing n.
|
||||||
chunkMaker.ForEachChunk(
|
if uint32(entry.Size) == 0 && (entry.Size >> 32) > 0 {
|
||||||
existingFile,
|
n = int64(2)
|
||||||
func (chunk *Chunk, final bool) {
|
}
|
||||||
hash := chunk.GetHash()
|
_, err = existingFile.Seek(entry.Size - n, 0)
|
||||||
chunkSize := chunk.GetLength()
|
if err != nil {
|
||||||
existingChunks = append(existingChunks, hash)
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing: %v", fullPath, err)
|
||||||
existingLengths = append(existingLengths, chunkSize)
|
return false
|
||||||
offsetMap[hash] = offset
|
}
|
||||||
lengthMap[hash] = chunkSize
|
_, err = existingFile.Write([]byte("\x00\x00")[:n])
|
||||||
offset += int64(chunkSize)
|
if err != nil {
|
||||||
},
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to initialize the sparse file %s for in-place writing: %v", fullPath, err)
|
||||||
func (fileSize int64, hash string) (io.Reader, bool) {
|
return false
|
||||||
fileHash = hash
|
}
|
||||||
return nil, false
|
existingFile.Close()
|
||||||
})
|
existingFile, err = os.Open(fullPath)
|
||||||
if fileHash == entry.Hash {
|
if err != nil {
|
||||||
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
LOG_ERROR("DOWNLOAD_OPEN", "Can't reopen the initial file just created: %v", err)
|
||||||
return false
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
if !overwrite {
|
if !overwrite {
|
||||||
LOG_ERROR("DOWNLOAD_OVERWRITE",
|
LOG_ERROR("DOWNLOAD_OVERWRITE",
|
||||||
"File %s already exists. Please specify the -overwrite option to continue", entry.Path)
|
"File %s already exists. Please specify the -overwrite option to continue", entry.Path)
|
||||||
@@ -1039,9 +1196,83 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if inPlace {
|
fileHash := ""
|
||||||
if existingFile == nil {
|
if existingFile != nil {
|
||||||
inPlace = false
|
|
||||||
|
if inPlace {
|
||||||
|
// In inplace mode, we only consider chunks in the existing file with the same offsets, so we
|
||||||
|
// break the original file at offsets retrieved from the backup
|
||||||
|
fileHasher := manager.config.NewFileHasher()
|
||||||
|
buffer := make([]byte, 64 * 1024)
|
||||||
|
err = nil
|
||||||
|
// We set to read one more byte so the file hash will be different if the file to be restored is a
|
||||||
|
// truncated portion of the existing file
|
||||||
|
for i := entry.StartChunk; i <= entry.EndChunk + 1; i++ {
|
||||||
|
hasher := manager.config.NewKeyedHasher(manager.config.HashKey)
|
||||||
|
chunkSize := 1 // the size of extra chunk beyond EndChunk
|
||||||
|
if i == entry.StartChunk {
|
||||||
|
chunkSize -= entry.StartOffset
|
||||||
|
} else if i == entry.EndChunk {
|
||||||
|
chunkSize = entry.EndOffset
|
||||||
|
} else if i > entry.StartChunk && i < entry.EndChunk {
|
||||||
|
chunkSize = chunkDownloader.taskList[i].chunkLength
|
||||||
|
}
|
||||||
|
count := 0
|
||||||
|
for count < chunkSize {
|
||||||
|
n := chunkSize - count
|
||||||
|
if n > cap(buffer) {
|
||||||
|
n = cap(buffer)
|
||||||
|
}
|
||||||
|
n, err := existingFile.Read(buffer[:n])
|
||||||
|
if n > 0 {
|
||||||
|
hasher.Write(buffer[:n])
|
||||||
|
fileHasher.Write(buffer[:n])
|
||||||
|
count += n
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("DOWNLOAD_SPLIT", "Failed to read existing file: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if count > 0 {
|
||||||
|
hash := string(hasher.Sum(nil))
|
||||||
|
existingChunks = append(existingChunks, hash)
|
||||||
|
existingLengths = append(existingLengths, chunkSize)
|
||||||
|
offsetMap[hash] = offset
|
||||||
|
lengthMap[hash] = chunkSize
|
||||||
|
offset += int64(chunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fileHash = hex.EncodeToString(fileHasher.Sum(nil))
|
||||||
|
} else {
|
||||||
|
// If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so
|
||||||
|
// we run the chunk maker to split the original file.
|
||||||
|
chunkMaker.ForEachChunk(
|
||||||
|
existingFile,
|
||||||
|
func (chunk *Chunk, final bool) {
|
||||||
|
hash := chunk.GetHash()
|
||||||
|
chunkSize := chunk.GetLength()
|
||||||
|
existingChunks = append(existingChunks, hash)
|
||||||
|
existingLengths = append(existingLengths, chunkSize)
|
||||||
|
offsetMap[hash] = offset
|
||||||
|
lengthMap[hash] = chunkSize
|
||||||
|
offset += int64(chunkSize)
|
||||||
|
},
|
||||||
|
func (fileSize int64, hash string) (io.Reader, bool) {
|
||||||
|
fileHash = hash
|
||||||
|
return nil, false
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if fileHash == entry.Hash && fileHash != "" {
|
||||||
|
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1057,11 +1288,20 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
|
|
||||||
LOG_TRACE("DOWNLOAD_INPLACE", "Updating %s in place", fullPath)
|
LOG_TRACE("DOWNLOAD_INPLACE", "Updating %s in place", fullPath)
|
||||||
|
|
||||||
existingFile.Close()
|
if existingFile == nil {
|
||||||
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
|
// Create an empty file
|
||||||
if err != nil {
|
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
|
||||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
|
if err != nil {
|
||||||
return false
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing", fullPath)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Close and reopen in a different mode
|
||||||
|
existingFile.Close()
|
||||||
|
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
existingFile.Seek(0, 0)
|
existingFile.Seek(0, 0)
|
||||||
@@ -1128,7 +1368,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
|
|
||||||
// Verify the download by hash
|
// Verify the download by hash
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
if hash != entry.Hash {
|
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
||||||
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
|
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
|
||||||
fullPath, "", entry.Hash)
|
fullPath, "", entry.Hash)
|
||||||
return false
|
return false
|
||||||
@@ -1201,7 +1441,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
}
|
}
|
||||||
|
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
if hash != entry.Hash {
|
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
||||||
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
|
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
|
||||||
entry.Path, hash, entry.Hash)
|
entry.Path, hash, entry.Hash)
|
||||||
return false
|
return false
|
||||||
@@ -1244,14 +1484,27 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
revisionMap := make(map[int]bool)
|
if snapshotID == "" && len(revisionsToBeCopied) > 0 {
|
||||||
|
LOG_ERROR("SNAPSHOT_ERROR", "You must specify the snapshot id when one or more revisions are specified.")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
revisionMap := make(map[string]map[int]bool)
|
||||||
|
|
||||||
|
_, found := revisionMap[snapshotID]
|
||||||
|
if !found {
|
||||||
|
revisionMap[snapshotID] = make(map[int]bool)
|
||||||
|
}
|
||||||
|
|
||||||
for _, revision := range revisionsToBeCopied {
|
for _, revision := range revisionsToBeCopied {
|
||||||
revisionMap[revision] = true
|
revisionMap[snapshotID][revision] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
var snapshots [] *Snapshot
|
var snapshots [] *Snapshot
|
||||||
|
var otherSnapshots [] *Snapshot
|
||||||
var snapshotIDs [] string
|
var snapshotIDs [] string
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if snapshotID == "" {
|
if snapshotID == "" {
|
||||||
snapshotIDs, err = manager.SnapshotManager.ListSnapshotIDs()
|
snapshotIDs, err = manager.SnapshotManager.ListSnapshotIDs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1263,6 +1516,10 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, id := range snapshotIDs {
|
for _, id := range snapshotIDs {
|
||||||
|
_, found := revisionMap[id]
|
||||||
|
if !found {
|
||||||
|
revisionMap[id] = make(map[int]bool)
|
||||||
|
}
|
||||||
revisions, err := manager.SnapshotManager.ListSnapshotRevisions(id)
|
revisions, err := manager.SnapshotManager.ListSnapshotRevisions(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", id, err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", id, err)
|
||||||
@@ -1271,9 +1528,14 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
|
|
||||||
for _, revision := range revisions {
|
for _, revision := range revisions {
|
||||||
if len(revisionsToBeCopied) > 0 {
|
if len(revisionsToBeCopied) > 0 {
|
||||||
if _, found := revisionMap[revision]; !found {
|
if _, found := revisionMap[id][revision]; found {
|
||||||
|
revisionMap[id][revision] = true
|
||||||
|
} else {
|
||||||
|
revisionMap[id][revision] = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
revisionMap[id][revision] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotPath := fmt.Sprintf("snapshots/%s/%d", id, revision)
|
snapshotPath := fmt.Sprintf("snapshots/%s/%d", id, revision)
|
||||||
@@ -1285,21 +1547,44 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
}
|
}
|
||||||
|
|
||||||
if exist {
|
if exist {
|
||||||
LOG_INFO("SNAPSHOT_EXIST", "Snapshot %s at revision %d already exists in the destination storage",
|
LOG_INFO("SNAPSHOT_EXIST", "Snapshot %s at revision %d already exists at the destination storage",
|
||||||
id, revision)
|
id, revision)
|
||||||
|
revisionMap[id][revision] = false
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshot := manager.SnapshotManager.DownloadSnapshot(id, revision)
|
snapshot := manager.SnapshotManager.DownloadSnapshot(id, revision)
|
||||||
snapshots = append(snapshots, snapshot)
|
snapshots = append(snapshots, snapshot)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
otherRevisions, err := otherManager.SnapshotManager.ListSnapshotRevisions(id)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions at the destination for snapshot %s: %v", id, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, otherRevision := range otherRevisions {
|
||||||
|
otherSnapshot := otherManager.SnapshotManager.DownloadSnapshot(id, otherRevision)
|
||||||
|
otherSnapshots = append(otherSnapshots, otherSnapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshots) == 0 {
|
||||||
|
LOG_INFO("SNAPSHOT_COPY", "Nothing to copy, all snapshot revisions exist at the destination.")
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
chunks := make(map[string]bool)
|
chunks := make(map[string]bool)
|
||||||
|
|
||||||
for _, snapshot := range snapshots {
|
for _, snapshot := range snapshots {
|
||||||
|
|
||||||
|
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
||||||
|
|
||||||
for _, chunkHash := range snapshot.FileSequence {
|
for _, chunkHash := range snapshot.FileSequence {
|
||||||
chunks[chunkHash] = true
|
chunks[chunkHash] = true
|
||||||
}
|
}
|
||||||
@@ -1325,38 +1610,90 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, otherSnapshot := range otherSnapshots {
|
||||||
|
|
||||||
|
for _, chunkHash := range otherSnapshot.FileSequence {
|
||||||
|
if _, found := chunks[chunkHash]; found {
|
||||||
|
chunks[chunkHash] = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, chunkHash := range otherSnapshot.ChunkSequence {
|
||||||
|
if _, found := chunks[chunkHash]; found {
|
||||||
|
chunks[chunkHash] = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, chunkHash := range otherSnapshot.LengthSequence {
|
||||||
|
if _, found := chunks[chunkHash]; found {
|
||||||
|
chunks[chunkHash] = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
description := otherManager.SnapshotManager.DownloadSequence(otherSnapshot.ChunkSequence)
|
||||||
|
err := otherSnapshot.LoadChunks(description)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("SNAPSHOT_CHUNK", "Failed to load chunks for destination snapshot %s at revision %d: %v",
|
||||||
|
otherSnapshot.ID, otherSnapshot.Revision, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, chunkHash := range otherSnapshot.ChunkHashes {
|
||||||
|
if _, found := chunks[chunkHash]; found {
|
||||||
|
chunks[chunkHash] = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads)
|
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads)
|
||||||
|
|
||||||
chunkUploader := CreateChunkUploader(otherManager.config, otherManager.storage, nil, threads,
|
chunkUploader := CreateChunkUploader(otherManager.config, otherManager.storage, nil, threads,
|
||||||
func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||||
if skipped {
|
if skipped {
|
||||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) exists in the destination", chunk.GetID(), chunkIndex, len(chunks))
|
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) exists at the destination", chunk.GetID(), chunkIndex, len(chunks))
|
||||||
} else {
|
} else {
|
||||||
LOG_INFO("SNAPSHOT_COPY", "Copied chunk %s (%d/%d)", chunk.GetID(), chunkIndex, len(chunks))
|
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) copied to the destination", chunk.GetID(), chunkIndex, len(chunks))
|
||||||
}
|
}
|
||||||
|
otherManager.config.PutChunk(chunk)
|
||||||
})
|
})
|
||||||
|
|
||||||
chunkUploader.Start()
|
chunkUploader.Start()
|
||||||
|
|
||||||
|
totalCopied := 0
|
||||||
|
totalSkipped := 0
|
||||||
chunkIndex := 0
|
chunkIndex := 0
|
||||||
for chunkHash, _ := range chunks {
|
|
||||||
|
for chunkHash, needsCopy := range chunks {
|
||||||
chunkIndex++
|
chunkIndex++
|
||||||
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
||||||
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
if needsCopy {
|
||||||
|
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||||
LOG_DEBUG("SNAPSHOT_COPY", "Copying chunk %s to %s", chunkID, newChunkID)
|
LOG_DEBUG("SNAPSHOT_COPY", "Copying chunk %s to %s", chunkID, newChunkID)
|
||||||
|
i := chunkDownloader.AddChunk(chunkHash)
|
||||||
i := chunkDownloader.AddChunk(chunkHash)
|
chunk := chunkDownloader.WaitForChunk(i)
|
||||||
chunk := chunkDownloader.WaitForChunk(i)
|
newChunk := otherManager.config.GetChunk()
|
||||||
chunkUploader.StartChunk(chunk, chunkIndex)
|
newChunk.Reset(true)
|
||||||
|
newChunk.Write(chunk.GetBytes())
|
||||||
|
chunkUploader.StartChunk(newChunk, chunkIndex)
|
||||||
|
totalCopied++
|
||||||
|
} else {
|
||||||
|
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) skipped at the destination", chunkID, chunkIndex, len(chunks))
|
||||||
|
totalSkipped++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkDownloader.Stop()
|
chunkDownloader.Stop()
|
||||||
chunkUploader.Stop()
|
chunkUploader.Stop()
|
||||||
|
|
||||||
|
LOG_INFO("SNAPSHOT_COPY", "Total chunks copied = %d, skipped = %d.", totalCopied, totalSkipped)
|
||||||
|
|
||||||
for _, snapshot := range snapshots {
|
for _, snapshot := range snapshots {
|
||||||
otherManager.storage.CreateDirectory(0, fmt.Sprintf("snapshots/%s", manager.snapshotID))
|
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
otherManager.storage.CreateDirectory(0, fmt.Sprintf("snapshots/%s", snapshot.ID))
|
||||||
description, _ := snapshot.MarshalJSON()
|
description, _ := snapshot.MarshalJSON()
|
||||||
path := fmt.Sprintf("snapshots/%s/%d", manager.snapshotID, snapshot.Revision)
|
path := fmt.Sprintf("snapshots/%s/%d", snapshot.ID, snapshot.Revision)
|
||||||
otherManager.SnapshotManager.UploadFile(path, path, description)
|
otherManager.SnapshotManager.UploadFile(path, path, description)
|
||||||
LOG_INFO("SNAPSHOT_COPY", "Copied snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
LOG_INFO("SNAPSHOT_COPY", "Copied snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -104,6 +104,27 @@ func modifyFile(path string, portion float32) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func checkExistence(t *testing.T, path string, exists bool, isDir bool) {
|
||||||
|
stat, err := os.Stat(path)
|
||||||
|
if exists {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s does not exist: %v", path, err)
|
||||||
|
} else if isDir {
|
||||||
|
if !stat.Mode().IsDir() {
|
||||||
|
t.Errorf("%s is not a directory", path)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if stat.Mode().IsDir() {
|
||||||
|
t.Errorf("%s is not a file", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err == nil || !os.IsNotExist(err) {
|
||||||
|
t.Errorf("%s may exist: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func truncateFile(path string) {
|
func truncateFile(path string) {
|
||||||
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -173,6 +194,9 @@ func TestBackupManager(t *testing.T) {
|
|||||||
|
|
||||||
os.Mkdir(testDir + "/repository1", 0700)
|
os.Mkdir(testDir + "/repository1", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/dir1", 0700)
|
os.Mkdir(testDir + "/repository1/dir1", 0700)
|
||||||
|
os.Mkdir(testDir + "/repository1/.duplicacy", 0700)
|
||||||
|
os.Mkdir(testDir + "/repository2", 0700)
|
||||||
|
os.Mkdir(testDir + "/repository2/.duplicacy", 0700)
|
||||||
|
|
||||||
maxFileSize := 1000000
|
maxFileSize := 1000000
|
||||||
//maxFileSize := 200000
|
//maxFileSize := 200000
|
||||||
@@ -215,11 +239,14 @@ func TestBackupManager(t *testing.T) {
|
|||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager := CreateBackupManager("host1", storage, testDir, password)
|
backupManager := CreateBackupManager("host1", storage, testDir, password)
|
||||||
backupManager.SetupSnapshotCache(testDir + "/repository1", "default")
|
backupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
|
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir + "/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||||
|
|
||||||
@@ -240,8 +267,10 @@ func TestBackupManager(t *testing.T) {
|
|||||||
modifyFile(testDir + "/repository1/file2", 0.2)
|
modifyFile(testDir + "/repository1/file2", 0.2)
|
||||||
modifyFile(testDir + "/repository1/dir1/file3", 0.3)
|
modifyFile(testDir + "/repository1/dir1/file3", 0.3)
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
|
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir + "/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
||||||
|
|
||||||
@@ -253,11 +282,25 @@ func TestBackupManager(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Truncate file2 and add a few empty directories
|
||||||
truncateFile(testDir + "/repository1/file2")
|
truncateFile(testDir + "/repository1/file2")
|
||||||
|
os.Mkdir(testDir + "/repository1/dir2", 0700)
|
||||||
|
os.Mkdir(testDir + "/repository1/dir2/dir3", 0700)
|
||||||
|
os.Mkdir(testDir + "/repository1/dir4", 0700)
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
|
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
// Create some directories and files under repository2 that will be deleted during restore
|
||||||
|
os.Mkdir(testDir + "/repository2/dir5", 0700)
|
||||||
|
os.Mkdir(testDir + "/repository2/dir5/dir6", 0700)
|
||||||
|
os.Mkdir(testDir + "/repository2/dir7", 0700)
|
||||||
|
createRandomFile(testDir + "/repository2/file4", 100)
|
||||||
|
createRandomFile(testDir + "/repository2/dir5/file5", 100)
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir + "/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/true, /*showStatistics=*/false, /*patterns=*/nil)
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
@@ -267,9 +310,22 @@ func TestBackupManager(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// These files/dirs should not exist because deleteMode == true
|
||||||
|
checkExistence(t, testDir + "/repository2/dir5", false, false);
|
||||||
|
checkExistence(t, testDir + "/repository2/dir5/dir6", false, false);
|
||||||
|
checkExistence(t, testDir + "/repository2/dir7", false, false);
|
||||||
|
checkExistence(t, testDir + "/repository2/file4", false, false);
|
||||||
|
checkExistence(t, testDir + "/repository2/dir5/file5", false, false);
|
||||||
|
|
||||||
|
// These empty dirs should exist
|
||||||
|
checkExistence(t, testDir + "/repository2/dir2", true, true);
|
||||||
|
checkExistence(t, testDir + "/repository2/dir2/dir3", true, true);
|
||||||
|
checkExistence(t, testDir + "/repository2/dir4", true, true);
|
||||||
|
|
||||||
// Remove file2 and dir1/file3 and restore them from revision 3
|
// Remove file2 and dir1/file3 and restore them from revision 3
|
||||||
os.Remove(testDir + "/repository1/file2")
|
os.Remove(testDir + "/repository1/file2")
|
||||||
os.Remove(testDir + "/repository1/dir1/file3")
|
os.Remove(testDir + "/repository1/dir1/file3")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir + "/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -154,6 +154,18 @@ func (chunk *Chunk) GetID() string {
|
|||||||
return chunk.id
|
return chunk.id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (chunk *Chunk) VerifyID() {
|
||||||
|
hasher := chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
|
hasher.Write(chunk.buffer.Bytes())
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
hasher = chunk.config.NewKeyedHasher(chunk.config.IDKey)
|
||||||
|
hasher.Write([]byte(hash))
|
||||||
|
chunkID := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
if chunkID != chunk.GetID() {
|
||||||
|
LOG_ERROR("CHUNK_ID", "The chunk id should be %s instead of %s, length: %d", chunkID, chunk.GetID(), len(chunk.buffer.Bytes()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
||||||
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||||
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
|
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -45,8 +45,8 @@ type ChunkDownloader struct {
|
|||||||
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
|
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
|
||||||
|
|
||||||
startTime int64 // The time it starts downloading
|
startTime int64 // The time it starts downloading
|
||||||
totalFileSize int64 // Total file size
|
totalChunkSize int64 // Total chunk size
|
||||||
downloadedFileSize int64 // Downloaded file size
|
downloadedChunkSize int64 // Downloaded chunk size
|
||||||
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
||||||
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
||||||
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
||||||
@@ -95,7 +95,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
|||||||
downloader.taskList = nil
|
downloader.taskList = nil
|
||||||
lastChunkIndex := -1
|
lastChunkIndex := -1
|
||||||
maximumChunks := 0
|
maximumChunks := 0
|
||||||
downloader.totalFileSize = 0
|
downloader.totalChunkSize = 0
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file.Size == 0 {
|
if file.Size == 0 {
|
||||||
continue
|
continue
|
||||||
@@ -109,6 +109,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
|||||||
needed: false,
|
needed: false,
|
||||||
}
|
}
|
||||||
downloader.taskList = append(downloader.taskList, task)
|
downloader.taskList = append(downloader.taskList, task)
|
||||||
|
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
|
||||||
} else {
|
} else {
|
||||||
downloader.taskList[len(downloader.taskList) - 1].needed = true
|
downloader.taskList[len(downloader.taskList) - 1].needed = true
|
||||||
}
|
}
|
||||||
@@ -119,7 +120,6 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
|||||||
if file.EndChunk - file.StartChunk > maximumChunks {
|
if file.EndChunk - file.StartChunk > maximumChunks {
|
||||||
maximumChunks = file.EndChunk - file.StartChunk
|
maximumChunks = file.EndChunk - file.StartChunk
|
||||||
}
|
}
|
||||||
downloader.totalFileSize += file.Size
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -177,12 +177,6 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := downloader.lastChunkIndex; i < chunkIndex; i++ {
|
|
||||||
if !downloader.taskList[i].isDownloading {
|
|
||||||
atomic.AddInt64(&downloader.downloadedFileSize, int64(downloader.taskList[i].chunkLength))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, _ := range downloader.completedTasks {
|
for i, _ := range downloader.completedTasks {
|
||||||
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
||||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
@@ -320,7 +314,11 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
|
|
||||||
if !exist {
|
if !exist {
|
||||||
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
||||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
if err != nil {
|
||||||
|
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||||
|
} else {
|
||||||
|
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||||
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
|
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
|
||||||
@@ -353,21 +351,20 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (downloader.showStatistics || IsTracing()) && downloader.totalFileSize > 0 {
|
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
|
||||||
|
|
||||||
atomic.AddInt64(&downloader.downloadedFileSize, int64(chunk.GetLength()))
|
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
|
||||||
downloadFileSize := atomic.LoadInt64(&downloader.downloadedFileSize)
|
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
if now <= downloader.startTime {
|
if now <= downloader.startTime {
|
||||||
now = downloader.startTime + 1
|
now = downloader.startTime + 1
|
||||||
}
|
}
|
||||||
speed := downloadFileSize / (now - downloader.startTime)
|
speed := downloadedChunkSize / (now - downloader.startTime)
|
||||||
remainingTime := int64(0)
|
remainingTime := int64(0)
|
||||||
if speed > 0 {
|
if speed > 0 {
|
||||||
remainingTime = (downloader.totalFileSize - downloadFileSize) / speed + 1
|
remainingTime = (downloader.totalChunkSize - downloadedChunkSize) / speed + 1
|
||||||
}
|
}
|
||||||
percentage := float32(downloadFileSize * 1000 / downloader.totalFileSize)
|
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
|
||||||
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
|
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
|
||||||
task.chunkIndex + 1, chunk.GetLength(),
|
task.chunkIndex + 1, chunk.GetLength(),
|
||||||
PrettySize(speed), PrettyTime(remainingTime), percentage / 10)
|
PrettySize(speed), PrettyTime(remainingTime), percentage / 10)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -146,7 +146,6 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
startNewChunk()
|
|
||||||
maker.bufferStart = 0
|
maker.bufferStart = 0
|
||||||
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
|
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
|
||||||
count, err := reader.Read(maker.buffer[maker.bufferStart : maker.minimumChunkSize])
|
count, err := reader.Read(maker.buffer[maker.bufferStart : maker.minimumChunkSize])
|
||||||
@@ -174,10 +173,14 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
endOfChunk(chunk, false)
|
endOfChunk(chunk, false)
|
||||||
|
startNewChunk()
|
||||||
fileSize = 0
|
fileSize = 0
|
||||||
fileHasher = maker.config.NewFileHasher()
|
fileHasher = maker.config.NewFileHasher()
|
||||||
isEOF = false
|
isEOF = false
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
endOfChunk(chunk, false)
|
||||||
|
startNewChunk()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -92,6 +92,11 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
|
|||||||
chunkSize := chunk.GetLength()
|
chunkSize := chunk.GetLength()
|
||||||
chunkID := chunk.GetID()
|
chunkID := chunk.GetID()
|
||||||
|
|
||||||
|
// For a snapshot chunk, verify that its chunk id is correct
|
||||||
|
if uploader.snapshotCache != nil {
|
||||||
|
chunk.VerifyID()
|
||||||
|
}
|
||||||
|
|
||||||
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
|
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
|
||||||
// Save a copy to the local snapshot.
|
// Save a copy to the local snapshot.
|
||||||
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
||||||
@@ -117,7 +122,7 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
|
|||||||
// Chunk deduplication by name in effect here.
|
// Chunk deduplication by name in effect here.
|
||||||
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
|
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
|
||||||
|
|
||||||
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, 0)
|
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
|
||||||
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -104,7 +104,7 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
|
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
||||||
chunkDownloader.totalFileSize = int64(totalFileSize)
|
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
||||||
|
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
chunkDownloader.AddChunk(chunk.GetHash())
|
chunkDownloader.AddChunk(chunk.GetHash())
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -225,8 +225,41 @@ func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var SkipFileHash = false
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if value, found := os.LookupEnv("DUPLICACY_SKIP_FILE_HASH"); found && value != "" && value != "0" {
|
||||||
|
SkipFileHash = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement a dummy hasher to be used when SkipFileHash is true.
|
||||||
|
type DummyHasher struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) Write(p []byte) (int, error) {
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) Sum(b []byte) []byte {
|
||||||
|
return []byte("")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) Reset() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) Size() int {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) BlockSize() int {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
func (config *Config) NewFileHasher() hash.Hash {
|
func (config *Config) NewFileHasher() hash.Hash {
|
||||||
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
if SkipFileHash {
|
||||||
|
return &DummyHasher {}
|
||||||
|
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
hasher, _ := blake2.New(&blake2.Config{ Size: 32 })
|
hasher, _ := blake2.New(&blake2.Config{ Size: 32 })
|
||||||
return hasher
|
return hasher
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -16,12 +15,14 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"strings"
|
"strings"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
// This is the hidden directory in the repository for storing various files.
|
// This is the hidden directory in the repository for storing various files.
|
||||||
var DUPLICACY_DIRECTORY = ".duplicacy"
|
var DUPLICACY_DIRECTORY = ".duplicacy"
|
||||||
|
var DUPLICACY_FILE = ".duplicacy"
|
||||||
|
|
||||||
// Regex for matching 'StartChunk:StartOffset:EndChunk:EndOffset'
|
// Regex for matching 'StartChunk:StartOffset:EndChunk:EndOffset'
|
||||||
var contentRegex = regexp.MustCompile(`^([0-9]+):([0-9]+):([0-9]+):([0-9]+)`)
|
var contentRegex = regexp.MustCompile(`^([0-9]+):([0-9]+):([0-9]+):([0-9]+)`)
|
||||||
@@ -488,7 +489,14 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string
|
|||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
entry = CreateEntryFromFileInfo(stat, "")
|
|
||||||
|
newEntry := CreateEntryFromFileInfo(stat, "")
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
// On Windows, stat.Name() is the last component of the target, so we need to construct the correct
|
||||||
|
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
||||||
|
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
||||||
|
}
|
||||||
|
entry = newEntry
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -18,19 +18,25 @@ import (
|
|||||||
type FileStorage struct {
|
type FileStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
|
minimumLevel int // The minimum level of directories to dive into before searching for the chunk file.
|
||||||
|
isCacheNeeded bool // Network storages require caching
|
||||||
storageDir string
|
storageDir string
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateFileStorage creates a file storage.
|
// CreateFileStorage creates a file storage.
|
||||||
func CreateFileStorage(storageDir string, threads int) (storage *FileStorage, err error) {
|
func CreateFileStorage(storageDir string, minimumLevel int, isCacheNeeded bool, threads int) (storage *FileStorage, err error) {
|
||||||
|
|
||||||
var stat os.FileInfo
|
var stat os.FileInfo
|
||||||
|
|
||||||
stat, err = os.Stat(storageDir)
|
stat, err = os.Stat(storageDir)
|
||||||
if os.IsNotExist(err) {
|
if err != nil {
|
||||||
err = os.MkdirAll(storageDir, 0744)
|
if os.IsNotExist(err) {
|
||||||
if err != nil {
|
err = os.MkdirAll(storageDir, 0744)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -45,6 +51,8 @@ func CreateFileStorage(storageDir string, threads int) (storage *FileStorage, er
|
|||||||
|
|
||||||
storage = &FileStorage {
|
storage = &FileStorage {
|
||||||
storageDir : storageDir,
|
storageDir : storageDir,
|
||||||
|
minimumLevel: minimumLevel,
|
||||||
|
isCacheNeeded: isCacheNeeded,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -128,16 +136,18 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
suffix = ".fsl"
|
suffix = ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
// The minimum level of directories to dive into before searching for the chunk file.
|
|
||||||
minimumLevel := 2
|
|
||||||
|
|
||||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
for level := 0; level * 2 < len(chunkID); level ++ {
|
||||||
if level >= minimumLevel {
|
if level >= storage.minimumLevel {
|
||||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
||||||
if stat, err := os.Stat(filePath); err == nil && !stat.IsDir() {
|
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
|
||||||
|
// really a need to follow the link if filePath is a link.
|
||||||
|
stat, err := os.Lstat(filePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
|
||||||
|
} else if stat.IsDir() {
|
||||||
|
return filePath[len(storage.storageDir) + 1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||||
|
} else {
|
||||||
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
||||||
} else if err == nil && stat.IsDir() {
|
|
||||||
return filePath[len(storage.storageDir) + 1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -149,7 +159,7 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if level < minimumLevel {
|
if level < storage.minimumLevel {
|
||||||
// Create the subdirectory if it doesn't exist.
|
// Create the subdirectory if it doesn't exist.
|
||||||
|
|
||||||
if err == nil && !stat.IsDir() {
|
if err == nil && !stat.IsDir() {
|
||||||
@@ -158,9 +168,12 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
|
|
||||||
err = os.Mkdir(subDir, 0744)
|
err = os.Mkdir(subDir, 0744)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
// The directory may have been created by other threads so check it again.
|
||||||
|
stat, _ := os.Stat(subDir)
|
||||||
|
if stat == nil || !stat.IsDir() {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dir = subDir
|
dir = subDir
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -170,9 +183,7 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
return "", false, 0, fmt.Errorf("The maximum level of directories searched")
|
||||||
chunkID)
|
|
||||||
return "", false, 0, nil
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,7 +248,7 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *FileStorage) IsCacheNeeded () (bool) { return false }
|
func (storage *FileStorage) IsCacheNeeded () (bool) { return storage.isCacheNeeded }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -30,8 +30,9 @@ type GCDStorage struct {
|
|||||||
service *drive.Service
|
service *drive.Service
|
||||||
idCache map[string]string
|
idCache map[string]string
|
||||||
idCacheLock *sync.Mutex
|
idCacheLock *sync.Mutex
|
||||||
backoff int
|
backoffs []int
|
||||||
|
|
||||||
|
isConnected bool
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
@@ -44,12 +45,12 @@ type GCDConfig struct {
|
|||||||
Token oauth2.Token `json:"token"`
|
Token oauth2.Token `json:"token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *GCDStorage) shouldRetry(err error) (bool, error) {
|
func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error) {
|
||||||
|
|
||||||
retry := false
|
retry := false
|
||||||
message := ""
|
message := ""
|
||||||
if err == nil {
|
if err == nil {
|
||||||
storage.backoff = 1
|
storage.backoffs[threadIndex] = 1
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if e, ok := err.(*googleapi.Error); ok {
|
} else if e, ok := err.(*googleapi.Error); ok {
|
||||||
if 500 <= e.Code && e.Code < 600 {
|
if 500 <= e.Code && e.Code < 600 {
|
||||||
@@ -64,6 +65,12 @@ func (storage *GCDStorage) shouldRetry(err error) (bool, error) {
|
|||||||
// User Rate Limit Exceeded
|
// User Rate Limit Exceeded
|
||||||
message = "User Rate Limit Exceeded"
|
message = "User Rate Limit Exceeded"
|
||||||
retry = true
|
retry = true
|
||||||
|
} else if e.Code == 401 {
|
||||||
|
// Only retry on authorization error when storage has been connected before
|
||||||
|
if storage.isConnected {
|
||||||
|
message = "Authorization Error"
|
||||||
|
retry = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if e, ok := err.(*url.Error); ok {
|
} else if e, ok := err.(*url.Error); ok {
|
||||||
message = e.Error()
|
message = e.Error()
|
||||||
@@ -77,15 +84,15 @@ func (storage *GCDStorage) shouldRetry(err error) (bool, error) {
|
|||||||
retry = err.Temporary()
|
retry = err.Temporary()
|
||||||
}
|
}
|
||||||
|
|
||||||
if !retry || storage.backoff >= 256{
|
if !retry || storage.backoffs[threadIndex] >= 256 {
|
||||||
storage.backoff = 1
|
storage.backoffs[threadIndex] = 1
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
delay := float32(storage.backoff) * rand.Float32()
|
delay := float32(storage.backoffs[threadIndex]) * rand.Float32()
|
||||||
LOG_DEBUG("GCD_RETRY", "%s; retrying after %.2f seconds", message, delay)
|
LOG_DEBUG("GCD_RETRY", "%s; retrying after %.2f seconds", message, delay)
|
||||||
time.Sleep(time.Duration(float32(storage.backoff) * float32(time.Second)))
|
time.Sleep(time.Duration(float32(storage.backoffs[threadIndex]) * float32(time.Second)))
|
||||||
storage.backoff *= 2
|
storage.backoffs[threadIndex] *= 2
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -122,7 +129,7 @@ func (storage *GCDStorage) deletePathID(path string) {
|
|||||||
storage.idCacheLock.Unlock()
|
storage.idCacheLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *GCDStorage) listFiles(parentID string, listFiles bool) ([]*drive.File, error) {
|
func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles bool) ([]*drive.File, error) {
|
||||||
|
|
||||||
if parentID == "" {
|
if parentID == "" {
|
||||||
return nil, fmt.Errorf("No parent ID provided")
|
return nil, fmt.Errorf("No parent ID provided")
|
||||||
@@ -150,7 +157,7 @@ func (storage *GCDStorage) listFiles(parentID string, listFiles bool) ([]*drive.
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
fileList, err = storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Do()
|
fileList, err = storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Do()
|
||||||
if retry, e := storage.shouldRetry(err); e == nil && !retry {
|
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
continue
|
continue
|
||||||
@@ -171,7 +178,7 @@ func (storage *GCDStorage) listFiles(parentID string, listFiles bool) ([]*drive.
|
|||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *GCDStorage) listByName(parentID string, name string) (string, bool, int64, error) {
|
func (storage *GCDStorage) listByName(threadIndex int, parentID string, name string) (string, bool, int64, error) {
|
||||||
|
|
||||||
var fileList *drive.FileList
|
var fileList *drive.FileList
|
||||||
var err error
|
var err error
|
||||||
@@ -180,7 +187,7 @@ func (storage *GCDStorage) listByName(parentID string, name string) (string, boo
|
|||||||
query := "name = '" + name + "' and '" + parentID + "' in parents"
|
query := "name = '" + name + "' and '" + parentID + "' in parents"
|
||||||
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
|
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
|
||||||
|
|
||||||
if retry, e := storage.shouldRetry(err); e == nil && !retry {
|
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
continue
|
continue
|
||||||
@@ -198,7 +205,7 @@ func (storage *GCDStorage) listByName(parentID string, name string) (string, boo
|
|||||||
return file.Id, file.MimeType == "application/vnd.google-apps.folder", file.Size, nil
|
return file.Id, file.MimeType == "application/vnd.google-apps.folder", file.Size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *GCDStorage) getIDFromPath(path string) (string, error) {
|
func (storage *GCDStorage) getIDFromPath(threadIndex int, path string) (string, error) {
|
||||||
|
|
||||||
fileID := "root"
|
fileID := "root"
|
||||||
|
|
||||||
@@ -224,7 +231,7 @@ func (storage *GCDStorage) getIDFromPath(path string) (string, error) {
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
var isDir bool
|
var isDir bool
|
||||||
fileID, isDir, _, err = storage.listByName(fileID, name)
|
fileID, isDir, _, err = storage.listByName(threadIndex, fileID, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -269,9 +276,10 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
idCache: make(map[string]string),
|
idCache: make(map[string]string),
|
||||||
idCacheLock: &sync.Mutex{},
|
idCacheLock: &sync.Mutex{},
|
||||||
|
backoffs: make([]int, threads),
|
||||||
}
|
}
|
||||||
|
|
||||||
storagePathID, err := storage.getIDFromPath(storagePath)
|
storagePathID, err := storage.getIDFromPath(0, storagePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -279,7 +287,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
storage.idCache[""] = storagePathID
|
storage.idCache[""] = storagePathID
|
||||||
|
|
||||||
for _, dir := range []string { "chunks", "snapshots", "fossils" } {
|
for _, dir := range []string { "chunks", "snapshots", "fossils" } {
|
||||||
dirID, isDir, _, err := storage.listByName(storagePathID, dir)
|
dirID, isDir, _, err := storage.listByName(0, storagePathID, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -295,6 +303,8 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storage.isConnected = true
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -307,7 +317,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
|
|
||||||
files, err := storage.listFiles(storage.getPathID(dir), false)
|
files, err := storage.listFiles(threadIndex, storage.getPathID(dir), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -320,12 +330,12 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||||
pathID, err := storage.getIDFromPath(dir)
|
pathID, err := storage.getIDFromPath(threadIndex, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := storage.listFiles(pathID, true)
|
entries, err := storage.listFiles(threadIndex, pathID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -342,7 +352,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
sizes := []int64{}
|
sizes := []int64{}
|
||||||
|
|
||||||
for _, parent := range []string { "chunks", "fossils" } {
|
for _, parent := range []string { "chunks", "fossils" } {
|
||||||
entries, err := storage.listFiles(storage.getPathID(parent), true)
|
entries, err := storage.listFiles(threadIndex, storage.getPathID(parent), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -367,7 +377,7 @@ func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err err
|
|||||||
filePath = storage.convertFilePath(filePath)
|
filePath = storage.convertFilePath(filePath)
|
||||||
fileID, ok := storage.findPathID(filePath)
|
fileID, ok := storage.findPathID(filePath)
|
||||||
if !ok {
|
if !ok {
|
||||||
fileID, err = storage.getIDFromPath(filePath)
|
fileID, err = storage.getIDFromPath(threadIndex, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_TRACE("GCD_STORAGE", "Ignored file deletion error: %v", err)
|
LOG_TRACE("GCD_STORAGE", "Ignored file deletion error: %v", err)
|
||||||
return nil
|
return nil
|
||||||
@@ -376,7 +386,7 @@ func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err err
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
err = storage.service.Files.Delete(fileID).Fields("id").Do()
|
err = storage.service.Files.Delete(fileID).Fields("id").Do()
|
||||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
storage.deletePathID(filePath)
|
storage.deletePathID(filePath)
|
||||||
return nil
|
return nil
|
||||||
} else if retry {
|
} else if retry {
|
||||||
@@ -411,7 +421,7 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
_, err = storage.service.Files.Update(fileID, nil).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
_, err = storage.service.Files.Update(fileID, nil).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
||||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
continue
|
continue
|
||||||
@@ -460,7 +470,7 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
file, err = storage.service.Files.Create(file).Fields("id").Do()
|
file, err = storage.service.Files.Create(file).Fields("id").Do()
|
||||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
continue
|
continue
|
||||||
@@ -486,12 +496,12 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
|||||||
if dir == "." {
|
if dir == "." {
|
||||||
dir = ""
|
dir = ""
|
||||||
}
|
}
|
||||||
dirID, err := storage.getIDFromPath(dir)
|
dirID, err := storage.getIDFromPath(threadIndex, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fileID, isDir, size, err = storage.listByName(dirID, path.Base(filePath))
|
fileID, isDir, size, err = storage.listByName(threadIndex, dirID, path.Base(filePath))
|
||||||
if fileID != "" {
|
if fileID != "" {
|
||||||
storage.savePathID(filePath, fileID)
|
storage.savePathID(filePath, fileID)
|
||||||
}
|
}
|
||||||
@@ -500,7 +510,7 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
file, err := storage.service.Files.Get(fileID).Fields("id, mimeType").Do()
|
file, err := storage.service.Files.Get(fileID).Fields("id, mimeType").Do()
|
||||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
return true, file.MimeType == "application/vnd.google-apps.folder", file.Size, nil
|
return true, file.MimeType == "application/vnd.google-apps.folder", file.Size, nil
|
||||||
} else if retry {
|
} else if retry {
|
||||||
continue
|
continue
|
||||||
@@ -524,7 +534,7 @@ func (storage *GCDStorage) FindChunk(threadIndex int, chunkID string, isFossil b
|
|||||||
}
|
}
|
||||||
|
|
||||||
fileID := ""
|
fileID := ""
|
||||||
fileID, _, size, err = storage.listByName(parentID, chunkID)
|
fileID, _, size, err = storage.listByName(threadIndex, parentID, chunkID)
|
||||||
if fileID != "" {
|
if fileID != "" {
|
||||||
storage.savePathID(realPath, fileID)
|
storage.savePathID(realPath, fileID)
|
||||||
}
|
}
|
||||||
@@ -536,7 +546,7 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
// We never download the fossil so there is no need to convert the path
|
// We never download the fossil so there is no need to convert the path
|
||||||
fileID, ok := storage.findPathID(filePath)
|
fileID, ok := storage.findPathID(filePath)
|
||||||
if !ok {
|
if !ok {
|
||||||
fileID, err = storage.getIDFromPath(filePath)
|
fileID, err = storage.getIDFromPath(threadIndex, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -547,7 +557,7 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
response, err = storage.service.Files.Get(fileID).Download()
|
response, err = storage.service.Files.Get(fileID).Download()
|
||||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
continue
|
continue
|
||||||
@@ -574,7 +584,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
parentID, ok := storage.findPathID(parent)
|
parentID, ok := storage.findPathID(parent)
|
||||||
if !ok {
|
if !ok {
|
||||||
parentID, err = storage.getIDFromPath(parent)
|
parentID, err = storage.getIDFromPath(threadIndex, parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -590,7 +600,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
for {
|
for {
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
||||||
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
|
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
|
||||||
if retry, err := storage.shouldRetry(err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
"time"
|
"time"
|
||||||
"sync"
|
"sync"
|
||||||
"bytes"
|
"bytes"
|
||||||
@@ -64,7 +65,17 @@ func NewHubicClient(tokenFile string) (*HubicClient, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
client := &HubicClient{
|
client := &HubicClient{
|
||||||
HTTPClient: http.DefaultClient,
|
HTTPClient: &http.Client {
|
||||||
|
Transport: &http.Transport {
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
TLSHandshakeTimeout: 60 * time.Second,
|
||||||
|
ResponseHeaderTimeout: 30 * time.Second,
|
||||||
|
ExpectContinueTimeout: 10 * time.Second,
|
||||||
|
},
|
||||||
|
},
|
||||||
TokenFile: tokenFile,
|
TokenFile: tokenFile,
|
||||||
Token: token,
|
Token: token,
|
||||||
TokenLock: &sync.Mutex{},
|
TokenLock: &sync.Mutex{},
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -23,6 +23,7 @@ const (
|
|||||||
ASSERT = 4
|
ASSERT = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var LogFunction func(level int, logID string, message string)
|
||||||
|
|
||||||
var printLogHeader = false
|
var printLogHeader = false
|
||||||
|
|
||||||
@@ -117,6 +118,11 @@ func logf(level int, logID string, format string, v ...interface{}) {
|
|||||||
|
|
||||||
message := fmt.Sprintf(format, v...)
|
message := fmt.Sprintf(format, v...)
|
||||||
|
|
||||||
|
if LogFunction != nil {
|
||||||
|
LogFunction(level, logID, message)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
// Uncomment this line to enable unbufferred logging for tests
|
// Uncomment this line to enable unbufferred logging for tests
|
||||||
@@ -160,6 +166,9 @@ const (
|
|||||||
otherExitCode = 101
|
otherExitCode = 101
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// This is the function to be called before exiting when an error occurs.
|
||||||
|
var RunAtError func() = func() {}
|
||||||
|
|
||||||
func CatchLogException() {
|
func CatchLogException() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
switch e := r.(type) {
|
switch e := r.(type) {
|
||||||
@@ -167,10 +176,12 @@ func CatchLogException() {
|
|||||||
if printStackTrace {
|
if printStackTrace {
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
|
RunAtError()
|
||||||
os.Exit(duplicacyExitCode)
|
os.Exit(duplicacyExitCode)
|
||||||
default:
|
default:
|
||||||
fmt.Fprintf(os.Stderr, "%v\n", e)
|
fmt.Fprintf(os.Stderr, "%v\n", e)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
|
RunAtError()
|
||||||
os.Exit(otherExitCode)
|
os.Exit(otherExitCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"sync"
|
"sync"
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"strings"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
@@ -41,6 +42,7 @@ type OneDriveClient struct {
|
|||||||
Token *oauth2.Token
|
Token *oauth2.Token
|
||||||
TokenLock *sync.Mutex
|
TokenLock *sync.Mutex
|
||||||
|
|
||||||
|
IsConnected bool
|
||||||
TestMode bool
|
TestMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -87,7 +89,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
case []byte:
|
case []byte:
|
||||||
inputReader = bytes.NewReader(input.([]byte))
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
case int:
|
case int:
|
||||||
inputReader = bytes.NewReader([]byte(""))
|
inputReader = nil
|
||||||
case *bytes.Buffer:
|
case *bytes.Buffer:
|
||||||
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
||||||
case *RateLimitedReader:
|
case *RateLimitedReader:
|
||||||
@@ -115,9 +117,27 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
response, err = client.HTTPClient.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if client.IsConnected {
|
||||||
|
if strings.Contains(err.Error(), "TLS handshake timeout") {
|
||||||
|
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
|
||||||
|
// idle connections are not to be reused on reconnect.
|
||||||
|
retryAfter := time.Duration(rand.Float32() * 60000 + 180000)
|
||||||
|
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
|
||||||
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
|
} else {
|
||||||
|
// For all other errors just blindly retry until the maximum is reached
|
||||||
|
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||||
|
LOG_INFO("ONEDRIVE_RETRY", "%v; retry after %d milliseconds", err, retryAfter)
|
||||||
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
|
}
|
||||||
|
backoff *= 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
client.IsConnected = true
|
||||||
|
|
||||||
if response.StatusCode < 400 {
|
if response.StatusCode < 400 {
|
||||||
return response.Body, response.ContentLength, nil
|
return response.Body, response.ContentLength, nil
|
||||||
}
|
}
|
||||||
@@ -128,12 +148,6 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
Error: OneDriveError { Status: response.StatusCode },
|
Error: OneDriveError { Status: response.StatusCode },
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
|
||||||
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
|
|
||||||
}
|
|
||||||
|
|
||||||
errorResponse.Error.Status = response.StatusCode
|
|
||||||
|
|
||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
if url == OneDriveRefreshTokenURL {
|
if url == OneDriveRefreshTokenURL {
|
||||||
@@ -145,13 +159,18 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode == 500 || response.StatusCode == 503 || response.StatusCode == 509 {
|
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
||||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||||
LOG_INFO("ONEDRIVE_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||||
time.Sleep(retryAfter * time.Millisecond)
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
|
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
||||||
|
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
|
||||||
|
}
|
||||||
|
|
||||||
|
errorResponse.Error.Status = response.StatusCode
|
||||||
return nil, 0, errorResponse.Error
|
return nil, 0, errorResponse.Error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -169,7 +188,7 @@ func (client *OneDriveClient) RefreshToken() (err error) {
|
|||||||
|
|
||||||
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
|
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Preference stores options for each storage.
|
// Preference stores options for each storage.
|
||||||
@@ -23,11 +24,39 @@ type Preference struct {
|
|||||||
Keys map[string]string `json:"keys"`
|
Keys map[string]string `json:"keys"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var preferencePath string
|
||||||
var Preferences [] Preference
|
var Preferences [] Preference
|
||||||
|
|
||||||
func LoadPreferences(repository string) (bool) {
|
func LoadPreferences(repository string) bool {
|
||||||
|
|
||||||
|
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY)
|
||||||
|
|
||||||
|
stat, err := os.Stat(preferencePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", repository, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !stat.IsDir() {
|
||||||
|
content, err := ioutil.ReadFile(preferencePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
realPreferencePath := string(content)
|
||||||
|
stat, err := os.Stat(realPreferencePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !stat.IsDir() {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "The preference path %s is not a directory", realPreferencePath)
|
||||||
|
}
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(path.Join(repository, DUPLICACY_DIRECTORY, "preferences"))
|
preferencePath = realPreferencePath
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
|
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
|
||||||
return false
|
return false
|
||||||
@@ -47,15 +76,29 @@ func LoadPreferences(repository string) (bool) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func SavePreferences(repository string) (bool) {
|
func GetDuplicacyPreferencePath() string {
|
||||||
|
if preferencePath == "" {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "The preference path has not been set")
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return preferencePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normally 'preferencePath' is set in LoadPreferences; however, if LoadPreferences is not called, this function
|
||||||
|
// provide another change to set 'preferencePath'
|
||||||
|
func SetDuplicacyPreferencePath(p string) {
|
||||||
|
preferencePath = p
|
||||||
|
}
|
||||||
|
|
||||||
|
func SavePreferences() (bool) {
|
||||||
description, err := json.MarshalIndent(Preferences, "", " ")
|
description, err := json.MarshalIndent(Preferences, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
|
||||||
preferenceFile := path.Join(repository, DUPLICACY_DIRECTORY, "/preferences")
|
|
||||||
err = ioutil.WriteFile(preferenceFile, description, 0644)
|
err = ioutil.WriteFile(preferenceFile, description, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)
|
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)
|
||||||
return false
|
return false
|
||||||
@@ -65,9 +108,9 @@ func SavePreferences(repository string) (bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func FindPreference(name string) (*Preference) {
|
func FindPreference(name string) (*Preference) {
|
||||||
for _, preference := range Preferences {
|
for i, preference := range Preferences {
|
||||||
if preference.Name == name || preference.StorageURL == name {
|
if preference.Name == name || preference.StorageURL == name {
|
||||||
return &preference
|
return &Preferences[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
212
src/duplicacy_s3cstorage.go
Normal file
212
src/duplicacy_s3cstorage.go
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
"github.com/gilbertchen/goamz/aws"
|
||||||
|
"github.com/gilbertchen/goamz/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// S3CStorage is a storage backend for s3 compatible storages that require V2 Signing.
|
||||||
|
type S3CStorage struct {
|
||||||
|
RateLimitedStorage
|
||||||
|
|
||||||
|
buckets []*s3.Bucket
|
||||||
|
storageDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateS3CStorage creates a amazon s3 storage object.
|
||||||
|
func CreateS3CStorage(regionName string, endpoint string, bucketName string, storageDir string,
|
||||||
|
accessKey string, secretKey string, threads int) (storage *S3CStorage, err error) {
|
||||||
|
|
||||||
|
var region aws.Region
|
||||||
|
|
||||||
|
if endpoint == "" {
|
||||||
|
if regionName == "" {
|
||||||
|
regionName = "us-east-1"
|
||||||
|
}
|
||||||
|
region = aws.Regions[regionName]
|
||||||
|
} else {
|
||||||
|
region = aws.Region { Name: regionName, S3Endpoint:"https://" + endpoint }
|
||||||
|
}
|
||||||
|
|
||||||
|
auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey }
|
||||||
|
|
||||||
|
var buckets []*s3.Bucket
|
||||||
|
for i := 0; i < threads; i++ {
|
||||||
|
s3Client := s3.New(auth, region)
|
||||||
|
s3Client.AttemptStrategy = aws.AttemptStrategy{
|
||||||
|
Min: 8,
|
||||||
|
Total: 300 * time.Second,
|
||||||
|
Delay: 1000 * time.Millisecond,
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket := s3Client.Bucket(bucketName)
|
||||||
|
buckets = append(buckets, bucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &S3CStorage {
|
||||||
|
buckets: buckets,
|
||||||
|
storageDir: storageDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
dirLength := len(storage.storageDir + dir)
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "/", "", 100)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, subDir := range results.CommonPrefixes {
|
||||||
|
files = append(files, subDir[dirLength:])
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
} else if dir == "chunks/" {
|
||||||
|
marker := ""
|
||||||
|
for {
|
||||||
|
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", marker, 1000)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, object := range results.Contents {
|
||||||
|
files = append(files, object.Key[dirLength:])
|
||||||
|
sizes = append(sizes, object.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !results.IsTruncated {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
marker = results.Contents[len(results.Contents) - 1].Key
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", "", 1000)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, object := range results.Contents {
|
||||||
|
files = append(files, object.Key[dirLength:])
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *S3CStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
|
options := s3.CopyOptions { ContentType: "application/duplicacy" }
|
||||||
|
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir + to, s3.Private, options, storage.buckets[threadIndex].Name + "/" + storage.storageDir + from)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage.DeleteFile(threadIndex, from)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *S3CStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
response, err := storage.buckets[threadIndex].Head(storage.storageDir + filePath, nil)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode == 403 || response.StatusCode == 404 {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return true, false, response.ContentLength, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
|
// the suffix '.fsl'.
|
||||||
|
func (storage *S3CStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
|
|
||||||
|
filePath = "chunks/" + chunkID
|
||||||
|
if isFossil {
|
||||||
|
filePath += ".fsl"
|
||||||
|
}
|
||||||
|
|
||||||
|
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
} else {
|
||||||
|
return filePath, exist, size, err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
|
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.buckets))
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
options := s3.Options { }
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.buckets))
|
||||||
|
return storage.buckets[threadIndex].PutReader(storage.storageDir + filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *S3CStorage) IsCacheNeeded () (bool) { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *S3CStorage) IsMoveFileImplemented() (bool) { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *S3CStorage) IsStrongConsistent() (bool) { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *S3CStorage) IsFastListing() (bool) { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *S3CStorage) EnableTestMode() {}
|
||||||
@@ -1,61 +1,77 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"strings"
|
||||||
"github.com/gilbertchen/goamz/aws"
|
"reflect"
|
||||||
"github.com/gilbertchen/goamz/s3"
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type S3Storage struct {
|
type S3Storage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
buckets []*s3.Bucket
|
client *s3.S3
|
||||||
|
bucket string
|
||||||
storageDir string
|
storageDir string
|
||||||
|
numberOfThreads int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateS3Storage creates a amazon s3 storage object.
|
// CreateS3Storage creates a amazon s3 storage object.
|
||||||
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
|
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
|
||||||
accessKey string, secretKey string, threads int) (storage *S3Storage, err error) {
|
accessKey string, secretKey string, threads int,
|
||||||
|
isSSLSupported bool, isMinioCompatible bool) (storage *S3Storage, err error) {
|
||||||
|
|
||||||
var region aws.Region
|
token := ""
|
||||||
|
|
||||||
|
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
|
||||||
|
|
||||||
if endpoint == "" {
|
if regionName == "" && endpoint == "" {
|
||||||
if regionName == "" {
|
defaultRegionConfig := &aws.Config {
|
||||||
regionName = "us-east-1"
|
Region: aws.String("us-east-1"),
|
||||||
|
Credentials: auth,
|
||||||
}
|
}
|
||||||
region = aws.Regions[regionName]
|
|
||||||
} else {
|
s3Client := s3.New(session.New(defaultRegionConfig))
|
||||||
region = aws.Region { Name: regionName, S3Endpoint:"https://" + endpoint }
|
|
||||||
}
|
|
||||||
|
|
||||||
auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey }
|
response, err := s3Client.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String(bucketName)})
|
||||||
|
|
||||||
var buckets []*s3.Bucket
|
if err != nil {
|
||||||
for i := 0; i < threads; i++ {
|
return nil, err
|
||||||
s3Client := s3.New(auth, region)
|
}
|
||||||
s3Client.AttemptStrategy = aws.AttemptStrategy{
|
|
||||||
Min: 8,
|
regionName = "us-east-1"
|
||||||
Total: 300 * time.Second,
|
if response.LocationConstraint != nil {
|
||||||
Delay: 1000 * time.Millisecond,
|
regionName = *response.LocationConstraint
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket := s3Client.Bucket(bucketName)
|
|
||||||
buckets = append(buckets, bucket)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
config := &aws.Config {
|
||||||
|
Region: aws.String(regionName),
|
||||||
|
Credentials: auth,
|
||||||
|
Endpoint: aws.String(endpoint),
|
||||||
|
S3ForcePathStyle: aws.Bool(isMinioCompatible),
|
||||||
|
DisableSSL: aws.Bool(!isSSLSupported),
|
||||||
|
}
|
||||||
|
|
||||||
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
||||||
storageDir += "/"
|
storageDir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &S3Storage {
|
storage = &S3Storage {
|
||||||
buckets: buckets,
|
client: s3.New(session.New(config)),
|
||||||
|
bucket: bucketName,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
|
numberOfThreads: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,67 +81,82 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
dirLength := len(storage.storageDir + dir)
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "/", "", 100)
|
dir = storage.storageDir + dir
|
||||||
|
input := s3.ListObjectsInput {
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Prefix: aws.String(dir),
|
||||||
|
Delimiter: aws.String("/"),
|
||||||
|
MaxKeys: aws.Int64(1000),
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.client.ListObjects(&input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, subDir := range results.CommonPrefixes {
|
for _, subDir := range output.CommonPrefixes {
|
||||||
files = append(files, subDir[dirLength:])
|
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else if dir == "chunks/" {
|
} else {
|
||||||
|
dir = storage.storageDir + dir
|
||||||
marker := ""
|
marker := ""
|
||||||
for {
|
for {
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", marker, 1000)
|
input := s3.ListObjectsInput {
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Prefix: aws.String(dir),
|
||||||
|
MaxKeys: aws.Int64(1000),
|
||||||
|
Marker: aws.String(marker),
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.client.ListObjects(&input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, object := range results.Contents {
|
for _, object := range output.Contents {
|
||||||
files = append(files, object.Key[dirLength:])
|
files = append(files, (*object.Key)[len(dir):])
|
||||||
sizes = append(sizes, object.Size)
|
sizes = append(sizes, *object.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !results.IsTruncated {
|
if !*output.IsTruncated {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
marker = results.Contents[len(results.Contents) - 1].Key
|
marker = *output.Contents[len(output.Contents) - 1].Key
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", "", 1000)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, object := range results.Contents {
|
|
||||||
files = append(files, object.Key[dirLength:])
|
|
||||||
}
|
|
||||||
return files, nil, nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
|
input := &s3.DeleteObjectInput {
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
|
}
|
||||||
|
_, err = storage.client.DeleteObject(input)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
options := s3.CopyOptions { ContentType: "application/duplicacy" }
|
input := &s3.CopyObjectInput {
|
||||||
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir + to, s3.Private, options, storage.buckets[threadIndex].Name + "/" + storage.storageDir + from)
|
Bucket: aws.String(storage.bucket),
|
||||||
if err != nil {
|
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
|
||||||
return nil
|
Key: aws.String(storage.storageDir + to),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_, err = storage.client.CopyObject(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return storage.DeleteFile(threadIndex, from)
|
return storage.DeleteFile(threadIndex, from)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
@@ -136,19 +167,24 @@ func (storage *S3Storage) CreateDirectory(threadIndex int, dir string) (err erro
|
|||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
response, err := storage.buckets[threadIndex].Head(storage.storageDir + filePath, nil)
|
input := &s3.HeadObjectInput {
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.client.HeadObject(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
|
if e, ok := err.(awserr.RequestFailure); ok && (e.StatusCode() == 403 || e.StatusCode() == 404) {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode == 403 || response.StatusCode == 404 {
|
if output == nil || output.ContentLength == nil {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return true, false, response.ContentLength, nil
|
return true, false, *output.ContentLength, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -174,14 +210,19 @@ func (storage *S3Storage) FindChunk(threadIndex int, chunkID string, isFossil bo
|
|||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
|
input := &s3.GetObjectInput {
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.client.GetObject(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer output.Body.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.buckets))
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.bucket))
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -189,9 +230,27 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
|
|||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
options := s3.Options { }
|
attempts := 0
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.buckets))
|
|
||||||
return storage.buckets[threadIndex].PutReader(storage.storageDir + filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
|
for {
|
||||||
|
input := &s3.PutObjectInput {
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
|
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
||||||
|
Body: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.bucket)),
|
||||||
|
ContentType: aws.String("application/duplicacy"),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = storage.client.PutObject(input)
|
||||||
|
if err == nil || attempts >= 3 || !strings.Contains(err.Error(), "XAmzContentSHA256Mismatch") {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
||||||
|
attempts += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -215,7 +215,11 @@ func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
|
|
||||||
err = storage.client.Mkdir(subDir)
|
err = storage.client.Mkdir(subDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
|
// The directory may have been created by other threads so check it again.
|
||||||
|
stat, _ := storage.client.Stat(subDir)
|
||||||
|
if stat == nil || !stat.IsDir() {
|
||||||
|
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dir = subDir
|
dir = subDir
|
||||||
@@ -281,7 +285,7 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
storage.client.Remove(temporaryFile)
|
storage.client.Remove(temporaryFile)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Uploaded file but failed to store it at %s", fullPath)
|
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -9,7 +9,6 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
"time"
|
"time"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
ole "github.com/gilbertchen/go-ole"
|
ole "github.com/gilbertchen/go-ole"
|
||||||
@@ -509,8 +508,9 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
|||||||
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
|
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
|
||||||
|
|
||||||
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
|
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
|
||||||
|
|
||||||
shadowLink = path.Join(top, DUPLICACY_DIRECTORY) + "\\shadow"
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
|
shadowLink = preferencePath + "\\shadow"
|
||||||
os.Remove(shadowLink)
|
os.Remove(shadowLink)
|
||||||
err = os.Symlink(snapshotPath + "\\", shadowLink)
|
err = os.Symlink(snapshotPath + "\\", shadowLink)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -67,7 +67,8 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
|
|||||||
}
|
}
|
||||||
|
|
||||||
var patterns []string
|
var patterns []string
|
||||||
patternFile, err := ioutil.ReadFile(path.Join(top, DUPLICACY_DIRECTORY, "filters"))
|
|
||||||
|
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, pattern := range strings.Split(string(patternFile), "\n") {
|
for _, pattern := range strings.Split(string(patternFile), "\n") {
|
||||||
pattern = strings.TrimSpace(pattern)
|
pattern = strings.TrimSpace(pattern)
|
||||||
@@ -136,6 +137,100 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
|
|||||||
return snapshot, skippedDirectories, skippedFiles, nil
|
return snapshot, skippedDirectories, skippedFiles, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This is the struct used to save/load incomplete snapshots
|
||||||
|
type IncompleteSnapshot struct {
|
||||||
|
Files [] *Entry
|
||||||
|
ChunkHashes []string
|
||||||
|
ChunkLengths [] int
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
|
||||||
|
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
||||||
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
|
description, err := ioutil.ReadFile(snapshotFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var incompleteSnapshot IncompleteSnapshot
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &incompleteSnapshot)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var chunkHashes []string
|
||||||
|
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
|
||||||
|
hash, err := hex.DecodeString(chunkHash)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
chunkHashes = append(chunkHashes, string(hash))
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot = &Snapshot {
|
||||||
|
Files: incompleteSnapshot.Files,
|
||||||
|
ChunkHashes: chunkHashes,
|
||||||
|
ChunkLengths: incompleteSnapshot.ChunkLengths,
|
||||||
|
}
|
||||||
|
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
|
||||||
|
return snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
|
||||||
|
func SaveIncompleteSnapshot(snapshot *Snapshot) {
|
||||||
|
var files []*Entry
|
||||||
|
for _, file := range snapshot.Files {
|
||||||
|
// All unprocessed files will have a size of -1
|
||||||
|
if file.Size >= 0 {
|
||||||
|
file.Attributes = nil
|
||||||
|
files = append(files, file)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var chunkHashes []string
|
||||||
|
for _, chunkHash := range snapshot.ChunkHashes {
|
||||||
|
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
|
||||||
|
}
|
||||||
|
|
||||||
|
incompleteSnapshot := IncompleteSnapshot {
|
||||||
|
Files: files,
|
||||||
|
ChunkHashes: chunkHashes,
|
||||||
|
ChunkLengths: snapshot.ChunkLengths,
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
|
err = ioutil.WriteFile(snapshotFile, description, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveIncompleteSnapshot() {
|
||||||
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
|
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
|
||||||
|
err = os.Remove(snapshotFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
|
||||||
|
} else {
|
||||||
|
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// CreateSnapshotFromDescription creates a snapshot from json decription.
|
// CreateSnapshotFromDescription creates a snapshot from json decription.
|
||||||
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
|
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -303,12 +303,8 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if patterns == nil {
|
if len(patterns) != 0 && !MatchPath(entry.Path, patterns) {
|
||||||
entry.Attributes = nil
|
entry.Attributes = nil
|
||||||
} else if len(patterns) != 0 {
|
|
||||||
if !MatchPath(entry.Path, patterns) {
|
|
||||||
entry.Attributes = nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
files = append(files, &entry)
|
files = append(files, &entry)
|
||||||
@@ -664,7 +660,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
|||||||
if snapshotID == "" {
|
if snapshotID == "" {
|
||||||
snapshotIDs, err = manager.ListSnapshotIDs()
|
snapshotIDs, err = manager.ListSnapshotIDs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snpashots: %v", err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -787,7 +783,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
if snapshotID == "" || showStatistics {
|
if snapshotID == "" || showStatistics {
|
||||||
snapshotIDs, err := manager.ListSnapshotIDs()
|
snapshotIDs, err := manager.ListSnapshotIDs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snpashots: %v", err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1084,7 +1080,7 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
|||||||
if alternateHash {
|
if alternateHash {
|
||||||
fileHash = "#" + fileHash
|
fileHash = "#" + fileHash
|
||||||
}
|
}
|
||||||
if strings.ToLower(fileHash) != strings.ToLower(file.Hash) {
|
if strings.ToLower(fileHash) != strings.ToLower(file.Hash) && !SkipFileHash {
|
||||||
LOG_WARN("SNAPSHOT_HASH", "File %s has mismatched hashes: %s vs %s", file.Path, file.Hash, fileHash)
|
LOG_WARN("SNAPSHOT_HASH", "File %s has mismatched hashes: %s vs %s", file.Path, file.Hash, fileHash)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -1092,15 +1088,18 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindFile returns the file entry that has the given file name.
|
// FindFile returns the file entry that has the given file name.
|
||||||
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string) (*Entry) {
|
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) (*Entry) {
|
||||||
for _, entry := range snapshot.Files {
|
for _, entry := range snapshot.Files {
|
||||||
if entry.Path == filePath {
|
if entry.Path == filePath {
|
||||||
return entry
|
return entry
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_ERROR("SNAPSHOT_FIND", "No file %s found in snapshot %s at revision %d",
|
if !suppressError {
|
||||||
filePath, snapshot.ID, snapshot.Revision)
|
LOG_ERROR("SNAPSHOT_FIND", "No file %s found in snapshot %s at revision %d",
|
||||||
|
filePath, snapshot.ID, snapshot.Revision)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1139,7 +1138,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
file := manager.FindFile(snapshot, path)
|
file := manager.FindFile(snapshot, path, false)
|
||||||
var content [] byte
|
var content [] byte
|
||||||
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
|
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
|
||||||
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
||||||
@@ -1197,7 +1196,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
|||||||
}
|
}
|
||||||
|
|
||||||
var leftFile []byte
|
var leftFile []byte
|
||||||
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath), func(content []byte) {
|
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath, false), func(content []byte) {
|
||||||
leftFile = append(leftFile, content...)
|
leftFile = append(leftFile, content...)
|
||||||
}) {
|
}) {
|
||||||
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
|
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
|
||||||
@@ -1207,7 +1206,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
|||||||
|
|
||||||
var rightFile []byte
|
var rightFile []byte
|
||||||
if rightSnapshot != nil {
|
if rightSnapshot != nil {
|
||||||
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath), func(content []byte) {
|
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath, false), func(content []byte) {
|
||||||
rightFile = append(rightFile, content...)
|
rightFile = append(rightFile, content...)
|
||||||
}) {
|
}) {
|
||||||
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
|
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
|
||||||
@@ -1376,7 +1375,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
|||||||
for _, revision := range revisions {
|
for _, revision := range revisions {
|
||||||
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
||||||
manager.DownloadSnapshotFileSequence(snapshot, nil)
|
manager.DownloadSnapshotFileSequence(snapshot, nil)
|
||||||
file := manager.FindFile(snapshot, filePath)
|
file := manager.FindFile(snapshot, filePath, true)
|
||||||
|
|
||||||
if file != nil {
|
if file != nil {
|
||||||
|
|
||||||
@@ -1496,7 +1495,7 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string
|
|||||||
// Note that a snapshot being created when step 2 is in progress may reference a fossil. To avoid this
|
// Note that a snapshot being created when step 2 is in progress may reference a fossil. To avoid this
|
||||||
// problem, never remove the lastest revision (unless exclusive is true), and only cache chunks referenced
|
// problem, never remove the lastest revision (unless exclusive is true), and only cache chunks referenced
|
||||||
// by the lastest revision.
|
// by the lastest revision.
|
||||||
func (manager *SnapshotManager) PruneSnapshots(top string, selfID string, snapshotID string, revisionsToBeDeleted []int,
|
func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, revisionsToBeDeleted []int,
|
||||||
tags []string, retentions []string,
|
tags []string, retentions []string,
|
||||||
exhaustive bool, exclusive bool, ignoredIDs []string,
|
exhaustive bool, exclusive bool, ignoredIDs []string,
|
||||||
dryRun bool, deleteOnly bool, collectOnly bool) bool {
|
dryRun bool, deleteOnly bool, collectOnly bool) bool {
|
||||||
@@ -1510,8 +1509,9 @@ func (manager *SnapshotManager) PruneSnapshots(top string, selfID string, snapsh
|
|||||||
if len(revisionsToBeDeleted) > 0 && (len(tags) > 0 || len(retentions) > 0) {
|
if len(revisionsToBeDeleted) > 0 && (len(tags) > 0 || len(retentions) > 0) {
|
||||||
LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified")
|
LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified")
|
||||||
}
|
}
|
||||||
|
|
||||||
logDir := path.Join(top, DUPLICACY_DIRECTORY, "logs")
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
|
logDir := path.Join(preferencePath, "logs")
|
||||||
os.Mkdir(logDir, 0700)
|
os.Mkdir(logDir, 0700)
|
||||||
logFileName := path.Join(logDir, time.Now().Format("prune-log-20060102-150405"))
|
logFileName := path.Join(logDir, time.Now().Format("prune-log-20060102-150405"))
|
||||||
logFile, err := os.OpenFile(logFileName, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
|
logFile, err := os.OpenFile(logFileName, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
|
||||||
@@ -1592,7 +1592,7 @@ func (manager *SnapshotManager) PruneSnapshots(top string, selfID string, snapsh
|
|||||||
// because we need to find out which chunks are not referenced.
|
// because we need to find out which chunks are not referenced.
|
||||||
snapshotIDs, err := manager.ListSnapshotIDs()
|
snapshotIDs, err := manager.ListSnapshotIDs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snpashots: %v", err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2180,7 +2180,9 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
|||||||
if len(entries) > 0 && entries[0].StartChunk != 0 {
|
if len(entries) > 0 && entries[0].StartChunk != 0 {
|
||||||
return fmt.Errorf("The first file starts at chunk %d", entries[0].StartChunk )
|
return fmt.Errorf("The first file starts at chunk %d", entries[0].StartChunk )
|
||||||
}
|
}
|
||||||
if lastChunk < numberOfChunks - 1 {
|
|
||||||
|
// There may be a last chunk whose size is 0 so we allow this to happen
|
||||||
|
if lastChunk < numberOfChunks - 2 {
|
||||||
return fmt.Errorf("The last file ends at chunk %d but the number of chunks is %d", lastChunk, numberOfChunks)
|
return fmt.Errorf("The last file ends at chunk %d but the number of chunks is %d", lastChunk, numberOfChunks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -95,14 +95,14 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
|
|||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
storage, _ := CreateFileStorage(testDir, 1)
|
storage, _ := CreateFileStorage(testDir, 2, false, 1)
|
||||||
storage.CreateDirectory(0, "chunks")
|
storage.CreateDirectory(0, "chunks")
|
||||||
storage.CreateDirectory(0, "snapshots")
|
storage.CreateDirectory(0, "snapshots")
|
||||||
config := CreateConfig()
|
config := CreateConfig()
|
||||||
snapshotManager := CreateSnapshotManager(config, storage)
|
snapshotManager := CreateSnapshotManager(config, storage)
|
||||||
|
|
||||||
cacheDir := path.Join(testDir, "cache")
|
cacheDir := path.Join(testDir, "cache")
|
||||||
snapshotCache, _ := CreateFileStorage(cacheDir, 1)
|
snapshotCache, _ := CreateFileStorage(cacheDir, 2, false, 1)
|
||||||
snapshotCache.CreateDirectory(0, "chunks")
|
snapshotCache.CreateDirectory(0, "chunks")
|
||||||
snapshotCache.CreateDirectory(0, "snapshots")
|
snapshotCache.CreateDirectory(0, "snapshots")
|
||||||
|
|
||||||
@@ -181,7 +181,7 @@ func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expecte
|
|||||||
|
|
||||||
snapshotIDs, err = manager.ListSnapshotIDs()
|
snapshotIDs, err = manager.ListSnapshotIDs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snpashots: %v", err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -248,11 +248,11 @@ func TestSingleRepositoryPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
|
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
|
||||||
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
|
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
|
||||||
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 1, 2)
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
@@ -261,7 +261,7 @@ func TestSingleRepositoryPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -288,11 +288,11 @@ func TestSingleHostPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
@@ -301,7 +301,7 @@ func TestSingleHostPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -329,11 +329,11 @@ func TestMultipleHostPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
@@ -342,7 +342,7 @@ func TestMultipleHostPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
@@ -351,7 +351,7 @@ func TestMultipleHostPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 4, 2)
|
checkTestSnapshots(snapshotManager, 4, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 4, 0)
|
checkTestSnapshots(snapshotManager, 4, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -376,7 +376,7 @@ func TestPruneAndResurrect(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 1, 2)
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
@@ -385,7 +385,7 @@ func TestPruneAndResurrect(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
|
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -413,11 +413,11 @@ func TestInactiveHostPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1")
|
t.Logf("Removing snapshot vm1@host1 revision 1")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
@@ -426,7 +426,7 @@ func TestInactiveHostPrune(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -454,14 +454,14 @@ func TestRetentionPolicy(t *testing.T) {
|
|||||||
checkTestSnapshots(snapshotManager, 30, 0)
|
checkTestSnapshots(snapshotManager, 30, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
|
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 19, 0)
|
checkTestSnapshots(snapshotManager, 19, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
|
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 19, 0)
|
checkTestSnapshots(snapshotManager, 19, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
|
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
|
||||||
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 12, 0)
|
checkTestSnapshots(snapshotManager, 12, 0)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -75,14 +75,10 @@ func (storage *RateLimitedStorage) SetRateLimits(downloadRateLimit int, uploadRa
|
|||||||
storage.UploadRateLimit = uploadRateLimit
|
storage.UploadRateLimit = uploadRateLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkHostKey(repository string, hostname string, remote net.Addr, key ssh.PublicKey) error {
|
func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||||
|
|
||||||
if len(repository) == 0 {
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
return nil
|
hostFile := path.Join(preferencePath, "known_hosts")
|
||||||
}
|
|
||||||
|
|
||||||
duplicacyDirectory := path.Join(repository, DUPLICACY_DIRECTORY)
|
|
||||||
hostFile := path.Join(duplicacyDirectory, "knowns_hosts")
|
|
||||||
file, err := os.OpenFile(hostFile, os.O_RDWR | os.O_CREATE, 0600)
|
file, err := os.OpenFile(hostFile, os.O_RDWR | os.O_CREATE, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -126,11 +122,12 @@ func checkHostKey(repository string, hostname string, remote net.Addr, key ssh.P
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateStorage creates a storage object based on the provide storage URL.
|
// CreateStorage creates a storage object based on the provide storage URL.
|
||||||
func CreateStorage(repository string, preference Preference, resetPassword bool, threads int) (storage Storage) {
|
func CreateStorage(preference Preference, resetPassword bool, threads int) (storage Storage) {
|
||||||
|
|
||||||
storageURL := preference.StorageURL
|
storageURL := preference.StorageURL
|
||||||
|
|
||||||
isFileStorage := false
|
isFileStorage := false
|
||||||
|
isCacheNeeded := false
|
||||||
|
|
||||||
if strings.HasPrefix(storageURL, "/") {
|
if strings.HasPrefix(storageURL, "/") {
|
||||||
isFileStorage = true
|
isFileStorage = true
|
||||||
@@ -144,11 +141,30 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
|
|||||||
|
|
||||||
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
|
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
|
||||||
isFileStorage = true
|
isFileStorage = true
|
||||||
|
isCacheNeeded = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFileStorage {
|
if isFileStorage {
|
||||||
fileStorage, err := CreateFileStorage(storageURL, threads)
|
fileStorage, err := CreateFileStorage(storageURL, 2, isCacheNeeded, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fileStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(storageURL, "flat://") {
|
||||||
|
fileStorage, err := CreateFileStorage(storageURL[7:], 0, false, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fileStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(storageURL, "samba://") {
|
||||||
|
fileStorage, err := CreateFileStorage(storageURL[8:], 2, true, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
@@ -184,6 +200,9 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
|
|||||||
username = username[:len(username) - 1]
|
username = username[:len(username) - 1]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If ssh_key_file is set, skip password-based login
|
||||||
|
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
|
||||||
|
|
||||||
password := ""
|
password := ""
|
||||||
passwordCallback := func() (string, error) {
|
passwordCallback := func() (string, error) {
|
||||||
LOG_DEBUG("SSH_PASSWORD", "Attempting password login")
|
LOG_DEBUG("SSH_PASSWORD", "Attempting password login")
|
||||||
@@ -203,7 +222,6 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
keyFile := ""
|
|
||||||
publicKeysCallback := func() ([]ssh.Signer, error) {
|
publicKeysCallback := func() ([]ssh.Signer, error) {
|
||||||
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
|
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
|
||||||
|
|
||||||
@@ -257,10 +275,19 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
authMethods := [] ssh.AuthMethod {
|
authMethods := [] ssh.AuthMethod {
|
||||||
|
}
|
||||||
|
passwordAuthMethods := [] ssh.AuthMethod {
|
||||||
ssh.PasswordCallback(passwordCallback),
|
ssh.PasswordCallback(passwordCallback),
|
||||||
ssh.KeyboardInteractive(keyboardInteractive),
|
ssh.KeyboardInteractive(keyboardInteractive),
|
||||||
|
}
|
||||||
|
keyFileAuthMethods := [] ssh.AuthMethod {
|
||||||
ssh.PublicKeysCallback(publicKeysCallback),
|
ssh.PublicKeysCallback(publicKeysCallback),
|
||||||
}
|
}
|
||||||
|
if keyFile != "" {
|
||||||
|
authMethods = append(keyFileAuthMethods, passwordAuthMethods...)
|
||||||
|
} else {
|
||||||
|
authMethods = append(passwordAuthMethods, keyFileAuthMethods...)
|
||||||
|
}
|
||||||
|
|
||||||
if RunInBackground {
|
if RunInBackground {
|
||||||
|
|
||||||
@@ -282,7 +309,7 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||||
return checkHostKey(repository, hostname, remote, key)
|
return checkHostKey(hostname, remote, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)
|
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)
|
||||||
@@ -297,7 +324,7 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
|
|||||||
SavePassword(preference, "ssh_password", password)
|
SavePassword(preference, "ssh_password", password)
|
||||||
}
|
}
|
||||||
return sftpStorage
|
return sftpStorage
|
||||||
} else if matched[1] == "s3" {
|
} else if matched[1] == "s3" || matched[1] == "s3c" || matched[1] == "minio" || matched[1] == "minios" {
|
||||||
|
|
||||||
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
|
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
|
||||||
|
|
||||||
@@ -323,15 +350,27 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
|
|||||||
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
|
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
|
||||||
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
|
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
|
||||||
|
|
||||||
s3Storage, err := CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
|
var err error
|
||||||
if err != nil {
|
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
|
if matched[1] == "s3c" {
|
||||||
return nil
|
storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios")
|
||||||
|
isSSLSupported := (matched[1] == "s3" || matched[1] == "minios")
|
||||||
|
storage, err = CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads, isSSLSupported, isMinioCompatible)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
SavePassword(preference, "s3_id", accessKey)
|
SavePassword(preference, "s3_id", accessKey)
|
||||||
SavePassword(preference, "s3_secret", secretKey)
|
SavePassword(preference, "s3_secret", secretKey)
|
||||||
|
|
||||||
return s3Storage
|
return storage
|
||||||
} else if matched[1] == "dropbox" {
|
} else if matched[1] == "dropbox" {
|
||||||
storageDir := matched[3] + matched[5]
|
storageDir := matched[3] + matched[5]
|
||||||
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
|
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -41,7 +41,7 @@ func init() {
|
|||||||
func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||||
|
|
||||||
if testStorageName == "" || testStorageName == "file" {
|
if testStorageName == "" || testStorageName == "file" {
|
||||||
return CreateFileStorage(localStoragePath, threads)
|
return CreateFileStorage(localStoragePath, 2, false, threads)
|
||||||
}
|
}
|
||||||
|
|
||||||
config, err := ioutil.ReadFile("test_storage.conf")
|
config, err := ioutil.ReadFile("test_storage.conf")
|
||||||
@@ -61,17 +61,27 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
|
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if testStorageName == "sftp" {
|
if testStorageName == "flat" {
|
||||||
|
return CreateFileStorage(localStoragePath, 0, false, threads)
|
||||||
|
} else if testStorageName == "samba" {
|
||||||
|
return CreateFileStorage(localStoragePath, 2, true, threads)
|
||||||
|
} else if testStorageName == "sftp" {
|
||||||
port, _ := strconv.Atoi(storage["port"])
|
port, _ := strconv.Atoi(storage["port"])
|
||||||
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
|
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
|
||||||
} else if testStorageName == "s3" {
|
} else if testStorageName == "s3" || testStorageName == "wasabi" {
|
||||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
|
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
|
||||||
|
} else if testStorageName == "s3c" {
|
||||||
|
return CreateS3CStorage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
|
||||||
|
} else if testStorageName == "minio" {
|
||||||
|
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, false, true)
|
||||||
|
} else if testStorageName == "minios" {
|
||||||
|
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, true)
|
||||||
} else if testStorageName == "dropbox" {
|
} else if testStorageName == "dropbox" {
|
||||||
return CreateDropboxStorage(storage["token"], storage["directory"], threads)
|
return CreateDropboxStorage(storage["token"], storage["directory"], threads)
|
||||||
} else if testStorageName == "b2" {
|
} else if testStorageName == "b2" {
|
||||||
return CreateB2Storage(storage["account"], storage["key"], storage["bucket"], threads)
|
return CreateB2Storage(storage["account"], storage["key"], storage["bucket"], threads)
|
||||||
} else if testStorageName == "gcs-s3" {
|
} else if testStorageName == "gcs-s3" {
|
||||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
|
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
|
||||||
} else if testStorageName == "gcs" {
|
} else if testStorageName == "gcs" {
|
||||||
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
|
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
|
||||||
} else if testStorageName == "gcs-sa" {
|
} else if testStorageName == "gcs-sa" {
|
||||||
@@ -448,3 +458,64 @@ func TestStorage(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestCleanStorage(t *testing.T) {
|
||||||
|
setTestingT(t)
|
||||||
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case Exception:
|
||||||
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
|
debug.PrintStack()
|
||||||
|
default:
|
||||||
|
t.Errorf("%v", e)
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} ()
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
|
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||||
|
|
||||||
|
storage, err := loadStorage(testDir, 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
directories := make([]string, 0, 1024)
|
||||||
|
directories = append(directories, "snapshots/")
|
||||||
|
directories = append(directories, "chunks/")
|
||||||
|
|
||||||
|
for len(directories) > 0 {
|
||||||
|
|
||||||
|
dir := directories[len(directories) - 1]
|
||||||
|
directories = directories[:len(directories) - 1]
|
||||||
|
|
||||||
|
LOG_INFO("LIST_FILES", "Listing %s", dir)
|
||||||
|
|
||||||
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if len(file) > 0 && file[len(file) - 1] == '/' {
|
||||||
|
directories = append(directories, dir + file)
|
||||||
|
} else {
|
||||||
|
storage.DeleteFile(0, dir + file)
|
||||||
|
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DeleteFile(0, "config")
|
||||||
|
LOG_INFO("DELETE_FILE", "Deleted config")
|
||||||
|
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
@@ -9,7 +9,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"bufio"
|
"bufio"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"time"
|
"time"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@@ -48,6 +47,16 @@ func (reader *RateLimitedReader) Reset() {
|
|||||||
reader.Next = 0
|
reader.Next = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (reader *RateLimitedReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if whence == io.SeekStart {
|
||||||
|
reader.Next = int(offset)
|
||||||
|
} else if whence == io.SeekCurrent {
|
||||||
|
reader.Next += int(offset)
|
||||||
|
} else {
|
||||||
|
reader.Next = len(reader.Content) - int(offset)
|
||||||
|
}
|
||||||
|
return int64(reader.Next), nil
|
||||||
|
}
|
||||||
|
|
||||||
func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {
|
func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {
|
||||||
|
|
||||||
@@ -109,10 +118,8 @@ func GenerateKeyFromPassword(password string) []byte {
|
|||||||
return pbkdf2.Key([]byte(password), DEFAULT_KEY, 16384, 32, sha256.New)
|
return pbkdf2.Key([]byte(password), DEFAULT_KEY, 16384, 32, sha256.New)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
// Get password from preference, env, but don't start any keyring request
|
||||||
func GetPassword(preference Preference, passwordType string, prompt string,
|
func GetPasswordFromPreference(preference Preference, passwordType string) (string) {
|
||||||
showPassword bool, resetPassword bool) (string) {
|
|
||||||
|
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
if preference.Name != "default" {
|
if preference.Name != "default" {
|
||||||
passwordID = preference.Name + "_" + passwordID
|
passwordID = preference.Name + "_" + passwordID
|
||||||
@@ -126,11 +133,31 @@ func GetPassword(preference Preference, passwordType string, prompt string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the password is stored in the preference, there is no need to include the storage name
|
||||||
|
// (i.e., preference.Name) in the key, so the key name should really be passwordType rather
|
||||||
|
// than passwordID; we're using passwordID here only for backward compatibility
|
||||||
if len(preference.Keys) > 0 && len(preference.Keys[passwordID]) > 0 {
|
if len(preference.Keys) > 0 && len(preference.Keys[passwordID]) > 0 {
|
||||||
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordID)
|
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordID)
|
||||||
return preference.Keys[passwordID]
|
return preference.Keys[passwordID]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(preference.Keys) > 0 && len(preference.Keys[passwordType]) > 0 {
|
||||||
|
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordType)
|
||||||
|
return preference.Keys[passwordType]
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
||||||
|
func GetPassword(preference Preference, passwordType string, prompt string,
|
||||||
|
showPassword bool, resetPassword bool) (string) {
|
||||||
|
passwordID := passwordType
|
||||||
|
password := GetPasswordFromPreference(preference,passwordType)
|
||||||
|
if password != "" {
|
||||||
|
return password
|
||||||
|
}
|
||||||
|
|
||||||
if resetPassword && !RunInBackground {
|
if resetPassword && !RunInBackground {
|
||||||
keyringSet(passwordID, "")
|
keyringSet(passwordID, "")
|
||||||
} else {
|
} else {
|
||||||
@@ -146,7 +173,7 @@ func GetPassword(preference Preference, passwordType string, prompt string,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
password := ""
|
password = ""
|
||||||
fmt.Printf("%s", prompt)
|
fmt.Printf("%s", prompt)
|
||||||
if showPassword {
|
if showPassword {
|
||||||
scanner := bufio.NewScanner(os.Stdin)
|
scanner := bufio.NewScanner(os.Stdin)
|
||||||
@@ -166,6 +193,7 @@ func GetPassword(preference Preference, passwordType string, prompt string,
|
|||||||
|
|
||||||
// SavePassword saves the specified password in the keyring/keychain.
|
// SavePassword saves the specified password in the keyring/keychain.
|
||||||
func SavePassword(preference Preference, passwordType string, password string) {
|
func SavePassword(preference Preference, passwordType string, password string) {
|
||||||
|
|
||||||
if password == "" || RunInBackground {
|
if password == "" || RunInBackground {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -173,6 +201,12 @@ func SavePassword(preference Preference, passwordType string, password string) {
|
|||||||
if preference.DoNotSavePassword {
|
if preference.DoNotSavePassword {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the password is retrieved from env or preference, don't save it to keyring
|
||||||
|
if GetPasswordFromPreference(preference, passwordType) == password {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
if preference.Name != "default" {
|
if preference.Name != "default" {
|
||||||
passwordID = preference.Name + "_" + passwordID
|
passwordID = preference.Name + "_" + passwordID
|
||||||
@@ -180,54 +214,6 @@ func SavePassword(preference Preference, passwordType string, password string) {
|
|||||||
keyringSet(passwordID, password)
|
keyringSet(passwordID, password)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveEmptyDirectories remove all empty subdirectoreies under top.
|
|
||||||
func RemoveEmptyDirectories(top string) {
|
|
||||||
|
|
||||||
stack := make([]string, 0, 256)
|
|
||||||
|
|
||||||
stack = append(stack, top)
|
|
||||||
|
|
||||||
for len(stack) > 0 {
|
|
||||||
|
|
||||||
dir := stack[len(stack) - 1]
|
|
||||||
stack = stack[:len(stack) - 1]
|
|
||||||
|
|
||||||
files, err := ioutil.ReadDir(dir)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
if file.IsDir() && file.Name()[0] != '.' {
|
|
||||||
stack = append(stack, path.Join(dir, file.Name()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(files) == 0 {
|
|
||||||
if os.Remove(dir) != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
dir = path.Dir(dir)
|
|
||||||
for (len(dir) > len(top)) {
|
|
||||||
files, err := ioutil.ReadDir(dir)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(files) == 0 {
|
|
||||||
if os.Remove(dir) != nil {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dir = path.Dir(dir)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
|
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
|
||||||
// Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd'
|
// Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd'
|
||||||
// against '*ccd', and the version here fixed that issue.
|
// against '*ccd', and the version here fixed that issue.
|
||||||
@@ -321,6 +307,10 @@ func joinPath(components ...string) string {
|
|||||||
combinedPath := path.Join(components...)
|
combinedPath := path.Join(components...)
|
||||||
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
|
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
|
||||||
combinedPath = `\\?\` + filepath.Join(components...)
|
combinedPath = `\\?\` + filepath.Join(components...)
|
||||||
|
// If the path is on a samba drive we must use the UNC format
|
||||||
|
if strings.HasPrefix(combinedPath, `\\?\\\`) {
|
||||||
|
combinedPath = `\\?\UNC\` + combinedPath[6:]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return combinedPath
|
return combinedPath
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
// Licensed under the Fair Source License 0.9 (https://fair.io/)
|
// Free for personal use and commercial trial
|
||||||
// User Limitation: 5 users
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user