mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-08 22:54:38 -06:00
Compare commits
23 Commits
v3.1.0
...
s3-list-fi
| Author | SHA1 | Date | |
|---|---|---|---|
| 915776161f | |||
|
|
4e9d2c4cca | ||
|
|
cc482beb95 | ||
|
|
bf3ea8a83c | ||
|
|
981efc13e6 | ||
|
|
6445ecbcde | ||
|
|
ff207ba5bf | ||
|
|
3a81c1065a | ||
|
|
cdf8f5a857 | ||
|
|
1f9ad0e35c | ||
|
|
53b0f3f7b6 | ||
|
|
9f276047db | ||
|
|
c237269589 | ||
|
|
493ef603e3 | ||
|
|
889191a814 | ||
|
|
df80096cdf | ||
|
|
24c2ea76b9 | ||
|
|
15b6ef9d76 | ||
|
|
75b310b98e | ||
|
|
039b749a3e | ||
|
|
9be475f876 | ||
|
|
d7593a828c | ||
|
|
238ef63e16 |
@@ -368,9 +368,8 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
"The storage '%s' has already been initialized", preference.StorageURL)
|
"The storage '%s' has already been initialized", preference.StorageURL)
|
||||||
if existingConfig.CompressionLevel >= -1 && existingConfig.CompressionLevel <= 9 {
|
if existingConfig.CompressionLevel >= -1 && existingConfig.CompressionLevel <= 9 {
|
||||||
duplicacy.LOG_INFO("STORAGE_FORMAT", "This storage is configured to use the pre-1.2.0 format")
|
duplicacy.LOG_INFO("STORAGE_FORMAT", "This storage is configured to use the pre-1.2.0 format")
|
||||||
} else if existingConfig.CompressionLevel != 100 {
|
} else if existingConfig.CompressionLevel != duplicacy.DEFAULT_COMPRESSION_LEVEL {
|
||||||
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "This storage is configured with an invalid compression level %d", existingConfig.CompressionLevel)
|
duplicacy.LOG_INFO("STORAGE_COMPRESSION", "Compression level: %d", existingConfig.CompressionLevel)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Don't print config in the background mode
|
// Don't print config in the background mode
|
||||||
@@ -378,8 +377,6 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
existingConfig.Print()
|
existingConfig.Print()
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
compressionLevel := 100
|
|
||||||
|
|
||||||
averageChunkSize := duplicacy.AtoSize(context.String("chunk-size"))
|
averageChunkSize := duplicacy.AtoSize(context.String("chunk-size"))
|
||||||
if averageChunkSize == 0 {
|
if averageChunkSize == 0 {
|
||||||
fmt.Fprintf(context.App.Writer, "Invalid average chunk size: %s.\n\n", context.String("chunk-size"))
|
fmt.Fprintf(context.App.Writer, "Invalid average chunk size: %s.\n\n", context.String("chunk-size"))
|
||||||
@@ -487,6 +484,18 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
compressionLevel := 100
|
||||||
|
zstdLevel := context.String("zstd-level")
|
||||||
|
if zstdLevel != "" {
|
||||||
|
if level, found := duplicacy.ZSTD_COMPRESSION_LEVELS[zstdLevel]; found {
|
||||||
|
compressionLevel = level
|
||||||
|
} else {
|
||||||
|
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "Invalid zstd compression level: %s", zstdLevel)
|
||||||
|
}
|
||||||
|
} else if context.Bool("zstd") {
|
||||||
|
compressionLevel = duplicacy.ZSTD_COMPRESSION_LEVEL_DEFAULT
|
||||||
|
}
|
||||||
|
|
||||||
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
||||||
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
|
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
|
||||||
}
|
}
|
||||||
@@ -786,6 +795,17 @@ func backupRepository(context *cli.Context) {
|
|||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SetDryRun(dryRun)
|
backupManager.SetDryRun(dryRun)
|
||||||
|
|
||||||
|
zstdLevel := context.String("zstd-level")
|
||||||
|
if zstdLevel != "" {
|
||||||
|
if level, found := duplicacy.ZSTD_COMPRESSION_LEVELS[zstdLevel]; found {
|
||||||
|
backupManager.SetCompressionLevel(level)
|
||||||
|
} else {
|
||||||
|
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "Invalid zstd compression level: %s", zstdLevel)
|
||||||
|
}
|
||||||
|
} else if context.Bool("zstd") {
|
||||||
|
backupManager.SetCompressionLevel(duplicacy.ZSTD_COMPRESSION_LEVEL_DEFAULT)
|
||||||
|
}
|
||||||
|
|
||||||
metadataChunkSize := context.Int("metadata-chunk-size")
|
metadataChunkSize := context.Int("metadata-chunk-size")
|
||||||
maximumInMemoryEntries := context.Int("max-in-memory-entries")
|
maximumInMemoryEntries := context.Int("max-in-memory-entries")
|
||||||
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly, metadataChunkSize, maximumInMemoryEntries)
|
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly, metadataChunkSize, maximumInMemoryEntries)
|
||||||
@@ -1428,6 +1448,15 @@ func main() {
|
|||||||
Usage: "the minimum size of chunks (defaults to chunk-size/4)",
|
Usage: "the minimum size of chunks (defaults to chunk-size/4)",
|
||||||
Argument: "<size>",
|
Argument: "<size>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "zstd-level",
|
||||||
|
Usage: "set zstd compression level (fast, default, better, or best)",
|
||||||
|
Argument: "<level>",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "zstd",
|
||||||
|
Usage: "short for -zstd default",
|
||||||
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "iterations",
|
Name: "iterations",
|
||||||
Usage: "the number of iterations used in storage key derivation (default is 16384)",
|
Usage: "the number of iterations used in storage key derivation (default is 16384)",
|
||||||
@@ -1495,6 +1524,15 @@ func main() {
|
|||||||
Name: "dry-run",
|
Name: "dry-run",
|
||||||
Usage: "dry run for testing, don't backup anything. Use with -stats and -d",
|
Usage: "dry run for testing, don't backup anything. Use with -stats and -d",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "zstd-level",
|
||||||
|
Usage: "set zstd compression level (fast, default, better, or best)",
|
||||||
|
Argument: "<level>",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "zstd",
|
||||||
|
Usage: "short for -zstd default",
|
||||||
|
},
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
Name: "vss",
|
Name: "vss",
|
||||||
Usage: "enable the Volume Shadow Copy service (Windows and macOS using APFS only)",
|
Usage: "enable the Volume Shadow Copy service (Windows and macOS using APFS only)",
|
||||||
@@ -1938,6 +1976,15 @@ func main() {
|
|||||||
Usage: "the minimum size of chunks (default is chunk-size/4)",
|
Usage: "the minimum size of chunks (default is chunk-size/4)",
|
||||||
Argument: "<size>",
|
Argument: "<size>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "zstd-level",
|
||||||
|
Usage: "set zstd compression level (fast, default, better, or best)",
|
||||||
|
Argument: "<level>",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "zstd",
|
||||||
|
Usage: "short for -zstd default",
|
||||||
|
},
|
||||||
cli.IntFlag{
|
cli.IntFlag{
|
||||||
Name: "iterations",
|
Name: "iterations",
|
||||||
Usage: "the number of iterations used in storage key derivation (default is 16384)",
|
Usage: "the number of iterations used in storage key derivation (default is 16384)",
|
||||||
@@ -2215,7 +2262,7 @@ func main() {
|
|||||||
app.Name = "duplicacy"
|
app.Name = "duplicacy"
|
||||||
app.HelpName = "duplicacy"
|
app.HelpName = "duplicacy"
|
||||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||||
app.Version = "3.1.0" + " (" + GitCommit + ")"
|
app.Version = "3.2.0" + " (" + GitCommit + ")"
|
||||||
|
|
||||||
// Exit with code 2 if an invalid command is provided
|
// Exit with code 2 if an invalid command is provided
|
||||||
app.CommandNotFound = func(context *cli.Context, command string) {
|
app.CommandNotFound = func(context *cli.Context, command string) {
|
||||||
|
|||||||
4
go.mod
4
go.mod
@@ -9,7 +9,7 @@ require (
|
|||||||
github.com/bkaradzic/go-lz4 v1.0.0
|
github.com/bkaradzic/go-lz4 v1.0.0
|
||||||
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible
|
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible
|
||||||
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9
|
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9
|
||||||
github.com/gilbertchen/go-dropbox v0.0.0-20221207034530-08c0c180a4f9
|
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916
|
||||||
github.com/gilbertchen/go-ole v1.2.0
|
github.com/gilbertchen/go-ole v1.2.0
|
||||||
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2
|
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2
|
||||||
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c
|
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c
|
||||||
@@ -40,7 +40,9 @@ require (
|
|||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||||
|
github.com/hirochachacha/go-smb2 v1.1.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.3.0 // indirect
|
github.com/jmespath/go-jmespath v0.3.0 // indirect
|
||||||
|
github.com/klauspost/compress v1.16.3 // indirect
|
||||||
github.com/klauspost/cpuid v1.3.1 // indirect
|
github.com/klauspost/cpuid v1.3.1 // indirect
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/kr/text v0.2.0 // indirect
|
github.com/kr/text v0.2.0 // indirect
|
||||||
|
|||||||
9
go.sum
9
go.sum
@@ -49,6 +49,8 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI
|
|||||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
|
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
|
||||||
|
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
|
||||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible h1:2fZxTUw5D9uGWnYTsU/obVavn+1qTF+TsVok3U8uN2Q=
|
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible h1:2fZxTUw5D9uGWnYTsU/obVavn+1qTF+TsVok3U8uN2Q=
|
||||||
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible/go.mod h1:qsVRCpBUm2l0eMUeI9wZ47yzra2+lv2YkGhMZpzBVUc=
|
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible/go.mod h1:qsVRCpBUm2l0eMUeI9wZ47yzra2+lv2YkGhMZpzBVUc=
|
||||||
@@ -60,6 +62,8 @@ github.com/gilbertchen/go-dropbox v0.0.0-20221128142034-9910c19f1d13 h1:54e1HiEX
|
|||||||
github.com/gilbertchen/go-dropbox v0.0.0-20221128142034-9910c19f1d13/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
|
github.com/gilbertchen/go-dropbox v0.0.0-20221128142034-9910c19f1d13/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
|
||||||
github.com/gilbertchen/go-dropbox v0.0.0-20221207034530-08c0c180a4f9 h1:3hJHxOyf/rAWWz9GNyai0hSt56vGMATS9B6yjw/bzzk=
|
github.com/gilbertchen/go-dropbox v0.0.0-20221207034530-08c0c180a4f9 h1:3hJHxOyf/rAWWz9GNyai0hSt56vGMATS9B6yjw/bzzk=
|
||||||
github.com/gilbertchen/go-dropbox v0.0.0-20221207034530-08c0c180a4f9/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
|
github.com/gilbertchen/go-dropbox v0.0.0-20221207034530-08c0c180a4f9/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
|
||||||
|
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916 h1:7VpJiGwW51MB7yJ5e27Ar/ej8Yu7WuU2SEo409qPoNs=
|
||||||
|
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
|
||||||
github.com/gilbertchen/go-ole v1.2.0 h1:ay65uwxo6w8UVOxN0+fuCqUXGaXxbmkGs5m4uY6e1Zw=
|
github.com/gilbertchen/go-ole v1.2.0 h1:ay65uwxo6w8UVOxN0+fuCqUXGaXxbmkGs5m4uY6e1Zw=
|
||||||
github.com/gilbertchen/go-ole v1.2.0/go.mod h1:NNiozp7QxhyGmHxxNdFKIcVaINvJFTAjBJ2gYzh8fsg=
|
github.com/gilbertchen/go-ole v1.2.0/go.mod h1:NNiozp7QxhyGmHxxNdFKIcVaINvJFTAjBJ2gYzh8fsg=
|
||||||
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2 h1:VDPwi3huqeJBtymgLOvPAP4S2gbSSK/UrWVwRbRAmnw=
|
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2 h1:VDPwi3huqeJBtymgLOvPAP4S2gbSSK/UrWVwRbRAmnw=
|
||||||
@@ -132,6 +136,8 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb
|
|||||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||||
|
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
|
||||||
|
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
|
||||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||||
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
|
||||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||||
@@ -141,6 +147,8 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
|
|||||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
|
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
|
||||||
|
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
|
||||||
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
|
||||||
@@ -281,6 +289,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
|
|||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 h1:71vQrMauZZhcTVK6KdYM+rklehEEwb3E+ZhaE5jrPrE=
|
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 h1:71vQrMauZZhcTVK6KdYM+rklehEEwb3E+ZhaE5jrPrE=
|
||||||
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
|||||||
@@ -329,7 +329,7 @@ func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bo
|
|||||||
if client.DownloadURL == "" {
|
if client.DownloadURL == "" {
|
||||||
client.DownloadURL = output.DownloadURL
|
client.DownloadURL = output.DownloadURL
|
||||||
}
|
}
|
||||||
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
|
LOG_INFO("BACKBLAZE_URL", "Download URL is: %s", client.DownloadURL)
|
||||||
client.IsAuthorized = true
|
client.IsAuthorized = true
|
||||||
|
|
||||||
client.LastAuthorizationTime = time.Now().Unix()
|
client.LastAuthorizationTime = time.Now().Unix()
|
||||||
@@ -584,8 +584,26 @@ func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID strin
|
|||||||
|
|
||||||
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
|
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
|
if !strings.HasSuffix(filePath, ".fsl") {
|
||||||
|
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
|
||||||
|
|
||||||
|
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
|
||||||
|
return readCloser, len, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We're trying to download a fossil file. We need to find the file ID of the last 'upload' of the file.
|
||||||
|
filePath = strings.TrimSuffix(filePath, ".fsl")
|
||||||
|
entries, err := client.ListFileNames(threadIndex, filePath, true, true)
|
||||||
|
fileId := ""
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.FileName == filePath && entry.Action == "upload" && entry.Size > 0 {
|
||||||
|
fileId = entry.FileID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proceed with the b2_download_file_by_id call
|
||||||
|
url := client.getAPIURL() + "/b2api/v1/b2_download_file_by_id?fileId=" + fileId
|
||||||
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
|
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
|
||||||
return readCloser, len, err
|
return readCloser, len, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -47,6 +47,10 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
|
|||||||
manager.config.dryRun = dryRun
|
manager.config.dryRun = dryRun
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (manager *BackupManager) SetCompressionLevel(level int) {
|
||||||
|
manager.config.CompressionLevel = level
|
||||||
|
}
|
||||||
|
|
||||||
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
||||||
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
||||||
// master key which can be nil if encryption is not enabled.
|
// master key which can be nil if encryption is not enabled.
|
||||||
@@ -138,6 +142,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
|
|
||||||
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
|
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
|
||||||
|
|
||||||
|
manager.config.PrintCompressionLevel()
|
||||||
|
|
||||||
if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
|
if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
|
||||||
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
|
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
|
||||||
manager.config.DataShards, manager.config.ParityShards)
|
manager.config.DataShards, manager.config.ParityShards)
|
||||||
@@ -1552,7 +1558,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
|
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
|
||||||
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
|
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
|
||||||
|
|
||||||
if !manager.config.IsCompatiableWith(otherManager.config) {
|
if !manager.config.IsCompatibleWith(otherManager.config) {
|
||||||
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
|
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/bkaradzic/go-lz4"
|
"github.com/bkaradzic/go-lz4"
|
||||||
"github.com/minio/highwayhash"
|
"github.com/minio/highwayhash"
|
||||||
"github.com/klauspost/reedsolomon"
|
"github.com/klauspost/reedsolomon"
|
||||||
|
"github.com/klauspost/compress/zstd"
|
||||||
|
|
||||||
// This is a fork of github.com/minio/highwayhash at 1.0.1 that computes incorrect hash on
|
// This is a fork of github.com/minio/highwayhash at 1.0.1 that computes incorrect hash on
|
||||||
// arm64 machines. We need this fork to be able to read the chunks created by Duplicacy
|
// arm64 machines. We need this fork to be able to read the chunks created by Duplicacy
|
||||||
@@ -267,6 +268,38 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isMetada
|
|||||||
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
||||||
deflater.Write(chunk.buffer.Bytes())
|
deflater.Write(chunk.buffer.Bytes())
|
||||||
deflater.Close()
|
deflater.Close()
|
||||||
|
} else if chunk.config.CompressionLevel >= ZSTD_COMPRESSION_LEVEL_FASTEST && chunk.config.CompressionLevel <= ZSTD_COMPRESSION_LEVEL_BEST {
|
||||||
|
encryptedBuffer.Write([]byte("ZSTD"))
|
||||||
|
|
||||||
|
compressionLevel := zstd.SpeedDefault
|
||||||
|
if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_FASTEST {
|
||||||
|
compressionLevel = zstd.SpeedFastest
|
||||||
|
} else if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_BETTER {
|
||||||
|
compressionLevel = zstd.SpeedBetterCompression
|
||||||
|
} else if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_BEST {
|
||||||
|
compressionLevel = zstd.SpeedBestCompression
|
||||||
|
}
|
||||||
|
|
||||||
|
deflater, err := zstd.NewWriter(encryptedBuffer, zstd.WithEncoderLevel(compressionLevel))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure we have enough space in encryptedBuffer
|
||||||
|
availableLength := encryptedBuffer.Cap() - len(encryptedBuffer.Bytes())
|
||||||
|
maximumLength := deflater.MaxEncodedSize(chunk.buffer.Len())
|
||||||
|
if availableLength < maximumLength {
|
||||||
|
encryptedBuffer.Grow(maximumLength - availableLength)
|
||||||
|
}
|
||||||
|
_, err = deflater.Write(chunk.buffer.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ZSTD compression error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deflater.Close()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ZSTD compression error: %v", err)
|
||||||
|
}
|
||||||
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
encryptedBuffer.Write([]byte("LZ4 "))
|
encryptedBuffer.Write([]byte("LZ4 "))
|
||||||
// Make sure we have enough space in encryptedBuffer
|
// Make sure we have enough space in encryptedBuffer
|
||||||
@@ -361,7 +394,6 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isMetada
|
|||||||
chunk.buffer.Write(header)
|
chunk.buffer.Write(header)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is to ensure compatibility with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
|
// This is to ensure compatibility with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
|
||||||
@@ -633,6 +665,24 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
chunk.hash = nil
|
chunk.hash = nil
|
||||||
return nil, rewriteNeeded
|
return nil, rewriteNeeded
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(compressed) > 4 && string(compressed[:4]) == "ZSTD" {
|
||||||
|
chunk.buffer.Reset()
|
||||||
|
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
|
chunk.hash = nil
|
||||||
|
|
||||||
|
encryptedBuffer.Read(encryptedBuffer.Bytes()[:4])
|
||||||
|
inflater, err := zstd.NewReader(encryptedBuffer)
|
||||||
|
if err != nil {
|
||||||
|
return err, false
|
||||||
|
}
|
||||||
|
defer inflater.Close()
|
||||||
|
if _, err = io.Copy(chunk, inflater); err != nil {
|
||||||
|
return err, false
|
||||||
|
}
|
||||||
|
return nil, rewriteNeeded
|
||||||
|
}
|
||||||
|
|
||||||
inflater, err := zlib.NewReader(encryptedBuffer)
|
inflater, err := zlib.NewReader(encryptedBuffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, false
|
return err, false
|
||||||
|
|||||||
@@ -35,6 +35,19 @@ var DEFAULT_KEY = []byte("duplicacy")
|
|||||||
// standard zlib levels of -1 to 9.
|
// standard zlib levels of -1 to 9.
|
||||||
var DEFAULT_COMPRESSION_LEVEL = 100
|
var DEFAULT_COMPRESSION_LEVEL = 100
|
||||||
|
|
||||||
|
// zstd compression levels starting from 200
|
||||||
|
var ZSTD_COMPRESSION_LEVEL_FASTEST = 200
|
||||||
|
var ZSTD_COMPRESSION_LEVEL_DEFAULT = 201
|
||||||
|
var ZSTD_COMPRESSION_LEVEL_BETTER = 202
|
||||||
|
var ZSTD_COMPRESSION_LEVEL_BEST = 203
|
||||||
|
|
||||||
|
var ZSTD_COMPRESSION_LEVELS = map[string]int {
|
||||||
|
"fastest": ZSTD_COMPRESSION_LEVEL_FASTEST,
|
||||||
|
"default": ZSTD_COMPRESSION_LEVEL_DEFAULT,
|
||||||
|
"better": ZSTD_COMPRESSION_LEVEL_BETTER,
|
||||||
|
"best": ZSTD_COMPRESSION_LEVEL_BEST,
|
||||||
|
}
|
||||||
|
|
||||||
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||||
var CONFIG_BANNER = "duplicacy\001"
|
var CONFIG_BANNER = "duplicacy\001"
|
||||||
|
|
||||||
@@ -156,10 +169,9 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
|
func (config *Config) IsCompatibleWith(otherConfig *Config) bool {
|
||||||
|
|
||||||
return config.CompressionLevel == otherConfig.CompressionLevel &&
|
return config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
||||||
config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
|
||||||
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
|
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
|
||||||
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
|
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
|
||||||
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
|
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
|
||||||
@@ -202,6 +214,14 @@ func (config *Config) Print() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (config *Config) PrintCompressionLevel() {
|
||||||
|
for name, level := range ZSTD_COMPRESSION_LEVELS {
|
||||||
|
if level == config.CompressionLevel {
|
||||||
|
LOG_INFO("COMPRESSION_LEVEL", "Zstd compression is enabled (level: %s)", name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
||||||
isEncrypted bool, copyFrom *Config, bitCopy bool) (config *Config) {
|
isEncrypted bool, copyFrom *Config, bitCopy bool) (config *Config) {
|
||||||
|
|
||||||
@@ -234,7 +254,6 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
|||||||
}
|
}
|
||||||
|
|
||||||
if copyFrom != nil {
|
if copyFrom != nil {
|
||||||
config.CompressionLevel = copyFrom.CompressionLevel
|
|
||||||
|
|
||||||
config.AverageChunkSize = copyFrom.AverageChunkSize
|
config.AverageChunkSize = copyFrom.AverageChunkSize
|
||||||
config.MaximumChunkSize = copyFrom.MaximumChunkSize
|
config.MaximumChunkSize = copyFrom.MaximumChunkSize
|
||||||
@@ -244,6 +263,8 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
|||||||
config.HashKey = copyFrom.HashKey
|
config.HashKey = copyFrom.HashKey
|
||||||
|
|
||||||
if bitCopy {
|
if bitCopy {
|
||||||
|
config.CompressionLevel = copyFrom.CompressionLevel
|
||||||
|
|
||||||
config.IDKey = copyFrom.IDKey
|
config.IDKey = copyFrom.IDKey
|
||||||
config.ChunkKey = copyFrom.ChunkKey
|
config.ChunkKey = copyFrom.ChunkKey
|
||||||
config.FileKey = copyFrom.FileKey
|
config.FileKey = copyFrom.FileKey
|
||||||
@@ -294,7 +315,10 @@ func (config *Config) PutChunk(chunk *Chunk) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
|
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
|
||||||
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
// Early versions of Duplicacy used SHA256 as the hash function for chunk IDs at the time when
|
||||||
|
// only zlib compression was supported. Later SHA256 was replaced by Blake2b and LZ4 was used
|
||||||
|
// for compression (with compression level set to 100).
|
||||||
|
if config.CompressionLevel >= DEFAULT_COMPRESSION_LEVEL {
|
||||||
hasher, err := blake2.New(&blake2.Config{Size: 32, Key: key})
|
hasher, err := blake2.New(&blake2.Config{Size: 32, Key: key})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
|
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
|
||||||
@@ -339,7 +363,7 @@ func (hasher *DummyHasher) BlockSize() int {
|
|||||||
func (config *Config) NewFileHasher() hash.Hash {
|
func (config *Config) NewFileHasher() hash.Hash {
|
||||||
if SkipFileHash {
|
if SkipFileHash {
|
||||||
return &DummyHasher{}
|
return &DummyHasher{}
|
||||||
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
} else if config.CompressionLevel >= DEFAULT_COMPRESSION_LEVEL {
|
||||||
hasher, _ := blake2.New(&blake2.Config{Size: 32})
|
hasher, _ := blake2.New(&blake2.Config{Size: 32})
|
||||||
return hasher
|
return hasher
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -550,7 +550,7 @@ func loadIncompleteSnapshot(snapshotID string, cachePath string) *EntryList {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("INCOMPLETE_LOAD", "Previous incomlete backup contains %d files and %d chunks",
|
LOG_INFO("INCOMPLETE_LOAD", "Previous incomplete backup contains %d files and %d chunks",
|
||||||
entryList.NumberOfEntries, len(entryList.PreservedChunkLengths) + len(entryList.UploadedChunkHashes))
|
entryList.NumberOfEntries, len(entryList.PreservedChunkLengths) + len(entryList.UploadedChunkHashes))
|
||||||
|
|
||||||
return entryList
|
return entryList
|
||||||
@@ -571,4 +571,4 @@ func deleteIncompleteSnapshot(cachePath string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -775,7 +775,7 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
|||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
// We never download the fossil so there is no need to convert the path
|
// We never download the fossil so there is no need to convert the path
|
||||||
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
fileID, err := storage.getIDFromPath(threadIndex, storage.convertFilePath(filePath), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
@@ -39,6 +40,7 @@ type OneDriveClient struct {
|
|||||||
|
|
||||||
TokenFile string
|
TokenFile string
|
||||||
Token *oauth2.Token
|
Token *oauth2.Token
|
||||||
|
OAConfig *oauth2.Config
|
||||||
TokenLock *sync.Mutex
|
TokenLock *sync.Mutex
|
||||||
|
|
||||||
IsConnected bool
|
IsConnected bool
|
||||||
@@ -49,7 +51,7 @@ type OneDriveClient struct {
|
|||||||
APIURL string
|
APIURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
|
func NewOneDriveClient(tokenFile string, isBusiness bool, client_id string, client_secret string, drive_id string) (*OneDriveClient, error) {
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -65,16 +67,34 @@ func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, erro
|
|||||||
HTTPClient: http.DefaultClient,
|
HTTPClient: http.DefaultClient,
|
||||||
TokenFile: tokenFile,
|
TokenFile: tokenFile,
|
||||||
Token: token,
|
Token: token,
|
||||||
|
OAConfig: nil,
|
||||||
TokenLock: &sync.Mutex{},
|
TokenLock: &sync.Mutex{},
|
||||||
IsBusiness: isBusiness,
|
IsBusiness: isBusiness,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (client_id != "") {
|
||||||
|
oneOauthConfig := oauth2.Config{
|
||||||
|
ClientID: client_id,
|
||||||
|
ClientSecret: client_secret,
|
||||||
|
Scopes: []string{"Files.ReadWrite", "offline_access"},
|
||||||
|
Endpoint: oauth2.Endpoint{
|
||||||
|
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
|
||||||
|
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
client.OAConfig = &oneOauthConfig
|
||||||
|
}
|
||||||
|
|
||||||
if isBusiness {
|
if isBusiness {
|
||||||
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
|
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
|
||||||
client.APIURL = "https://graph.microsoft.com/v1.0/me"
|
client.APIURL = "https://graph.microsoft.com/v1.0/me/drive"
|
||||||
|
if drive_id != "" {
|
||||||
|
client.APIURL = "https://graph.microsoft.com/v1.0/drives/"+drive_id
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
|
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
|
||||||
client.APIURL = "https://api.onedrive.com/v1.0"
|
client.APIURL = "https://api.onedrive.com/v1.0/drive"
|
||||||
}
|
}
|
||||||
|
|
||||||
client.RefreshToken(false)
|
client.RefreshToken(false)
|
||||||
@@ -218,15 +238,25 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
|
if (client.OAConfig == nil) {
|
||||||
if err != nil {
|
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
|
||||||
return fmt.Errorf("failed to refresh the access token: %v", err)
|
if err != nil {
|
||||||
}
|
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
|
||||||
return err
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ctx := context.Background()
|
||||||
|
tokenSource := client.OAConfig.TokenSource(ctx, client.Token)
|
||||||
|
token, err := tokenSource.Token()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||||
|
}
|
||||||
|
client.Token = token
|
||||||
}
|
}
|
||||||
|
|
||||||
description, err := json.Marshal(client.Token)
|
description, err := json.Marshal(client.Token)
|
||||||
@@ -258,9 +288,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
|||||||
|
|
||||||
entries := []OneDriveEntry{}
|
entries := []OneDriveEntry{}
|
||||||
|
|
||||||
url := client.APIURL + "/drive/root:/" + path + ":/children"
|
url := client.APIURL + "/root:/" + path + ":/children"
|
||||||
if path == "" {
|
if path == "" {
|
||||||
url = client.APIURL + "/drive/root/children"
|
url = client.APIURL + "/root/children"
|
||||||
}
|
}
|
||||||
if client.TestMode {
|
if client.TestMode {
|
||||||
url += "?top=8"
|
url += "?top=8"
|
||||||
@@ -296,7 +326,8 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
|||||||
|
|
||||||
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
||||||
|
|
||||||
url := client.APIURL + "/drive/root:/" + path
|
url := client.APIURL + "/root:/" + path
|
||||||
|
if path == "" { url = client.APIURL + "/root" }
|
||||||
url += "?select=id,name,size,folder"
|
url += "?select=id,name,size,folder"
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
@@ -321,7 +352,7 @@ func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, err
|
|||||||
|
|
||||||
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
|
url := client.APIURL + "/items/root:/" + path + ":/content"
|
||||||
|
|
||||||
return client.call(url, "GET", 0, "")
|
return client.call(url, "GET", 0, "")
|
||||||
}
|
}
|
||||||
@@ -331,7 +362,7 @@ func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit
|
|||||||
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
|
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
|
||||||
// is smaller than 4MB for OneDrive Business
|
// is smaller than 4MB for OneDrive Business
|
||||||
if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) {
|
if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) {
|
||||||
url := client.APIURL + "/drive/root:/" + path + ":/content"
|
url := client.APIURL + "/root:/" + path + ":/content"
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -365,7 +396,7 @@ func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
|
readCloser, _, err := client.call(client.APIURL + "/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -409,7 +440,7 @@ func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte
|
|||||||
|
|
||||||
func (client *OneDriveClient) DeleteFile(path string) error {
|
func (client *OneDriveClient) DeleteFile(path string) error {
|
||||||
|
|
||||||
url := client.APIURL + "/drive/root:/" + path
|
url := client.APIURL + "/root:/" + path
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -422,10 +453,10 @@ func (client *OneDriveClient) DeleteFile(path string) error {
|
|||||||
|
|
||||||
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||||
|
|
||||||
url := client.APIURL + "/drive/root:/" + path
|
url := client.APIURL + "/root:/" + path
|
||||||
|
|
||||||
parentReference := make(map[string]string)
|
parentReference := make(map[string]string)
|
||||||
parentReference["path"] = "/drive/root:/" + parent
|
parentReference["path"] = "/root:/" + parent
|
||||||
|
|
||||||
parameters := make(map[string]interface{})
|
parameters := make(map[string]interface{})
|
||||||
parameters["parentReference"] = parentReference
|
parameters["parentReference"] = parentReference
|
||||||
@@ -477,7 +508,7 @@ func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
|||||||
return fmt.Errorf("The path '%s' is not a directory", path)
|
return fmt.Errorf("The path '%s' is not a directory", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
url = client.APIURL + "/drive/root:/" + path + ":/children"
|
url = client.APIURL + "/root:/" + path + ":/children"
|
||||||
}
|
}
|
||||||
|
|
||||||
parameters := make(map[string]interface{})
|
parameters := make(map[string]interface{})
|
||||||
|
|||||||
@@ -19,13 +19,13 @@ type OneDriveStorage struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateOneDriveStorage creates an OneDrive storage object.
|
// CreateOneDriveStorage creates an OneDrive storage object.
|
||||||
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int, client_id string, client_secret string, drive_id string) (storage *OneDriveStorage, err error) {
|
||||||
|
|
||||||
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||||
storagePath = storagePath[:len(storagePath)-1]
|
storagePath = storagePath[:len(storagePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := NewOneDriveClient(tokenFile, isBusiness)
|
client, err := NewOneDriveClient(tokenFile, isBusiness, client_id, client_secret, drive_id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -90,48 +90,40 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
dir = storage.storageDir + dir
|
dir = storage.storageDir + dir
|
||||||
input := s3.ListObjectsInput{
|
input := s3.ListObjectsV2Input{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Prefix: aws.String(dir),
|
Prefix: aws.String(dir),
|
||||||
Delimiter: aws.String("/"),
|
Delimiter: aws.String("/"),
|
||||||
MaxKeys: aws.Int64(1000),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := storage.client.ListObjects(&input)
|
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||||
|
for _, subDir := range page.CommonPrefixes {
|
||||||
|
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, subDir := range output.CommonPrefixes {
|
|
||||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
|
||||||
}
|
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else {
|
} else {
|
||||||
dir = storage.storageDir + dir
|
dir = storage.storageDir + dir
|
||||||
marker := ""
|
input := s3.ListObjectsV2Input{
|
||||||
for {
|
Bucket: aws.String(storage.bucket),
|
||||||
input := s3.ListObjectsInput{
|
Prefix: aws.String(dir),
|
||||||
Bucket: aws.String(storage.bucket),
|
MaxKeys: aws.Int64(1000),
|
||||||
Prefix: aws.String(dir),
|
}
|
||||||
MaxKeys: aws.Int64(1000),
|
|
||||||
Marker: aws.String(marker),
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := storage.client.ListObjects(&input)
|
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||||
if err != nil {
|
for _, object := range page.Contents {
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, object := range output.Contents {
|
|
||||||
files = append(files, (*object.Key)[len(dir):])
|
files = append(files, (*object.Key)[len(dir):])
|
||||||
sizes = append(sizes, *object.Size)
|
sizes = append(sizes, *object.Size)
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
if !*output.IsTruncated {
|
})
|
||||||
break
|
if err != nil {
|
||||||
}
|
return nil, nil, err
|
||||||
|
|
||||||
marker = *output.Contents[len(output.Contents)-1].Key
|
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
|
|||||||
250
src/duplicacy_sambastorage.go
Normal file
250
src/duplicacy_sambastorage.go
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"math/rand"
|
||||||
|
|
||||||
|
"github.com/hirochachacha/go-smb2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SambaStorage is a local on-disk file storage implementing the Storage interface.
|
||||||
|
type SambaStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
share *smb2.Share
|
||||||
|
storageDir string
|
||||||
|
numberOfThreads int
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSambaStorage creates a file storage.
|
||||||
|
func CreateSambaStorage(server string, port int, username string, password string, shareName string, storageDir string, threads int) (storage *SambaStorage, err error) {
|
||||||
|
|
||||||
|
connection, err := net.Dial("tcp", fmt.Sprintf("%s:%d", server, port))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dialer := &smb2.Dialer{
|
||||||
|
Initiator: &smb2.NTLMInitiator{
|
||||||
|
User: username,
|
||||||
|
Password: password,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := dialer.Dial(connection)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
share, err := client.Mount(shareName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Random number fo generating the temporary chunk file suffix.
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
|
storage = &SambaStorage{
|
||||||
|
share: share,
|
||||||
|
numberOfThreads: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
exist, isDir, _, err := storage.GetFileInfo(0, storageDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to check the storage path %s: %v", storageDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
return nil, fmt.Errorf("The storage path %s does not exist", storageDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isDir {
|
||||||
|
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.storageDir = storageDir
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
|
||||||
|
func (storage *SambaStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
|
fullPath := path.Join(storage.storageDir, dir)
|
||||||
|
|
||||||
|
list, err := storage.share.ReadDir(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range list {
|
||||||
|
name := f.Name()
|
||||||
|
if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' {
|
||||||
|
name += "/"
|
||||||
|
}
|
||||||
|
files = append(files, name)
|
||||||
|
sizes = append(sizes, f.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *SambaStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
err = storage.share.Remove(path.Join(storage.storageDir, filePath))
|
||||||
|
if err == nil || os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *SambaStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
return storage.share.Rename(path.Join(storage.storageDir, from), path.Join(storage.storageDir, to))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *SambaStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
fmt.Printf("Creating directory %s\n", dir)
|
||||||
|
err = storage.share.Mkdir(path.Join(storage.storageDir, dir), 0744)
|
||||||
|
if err != nil && os.IsExist(err) {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *SambaStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
stat, err := storage.share.Stat(path.Join(storage.storageDir, filePath))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, stat.IsDir(), stat.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *SambaStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
|
file, err := storage.share.Open(path.Join(storage.storageDir, filePath))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'
|
||||||
|
func (storage *SambaStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
|
|
||||||
|
if len(strings.Split(filePath, "/")) > 2 {
|
||||||
|
dir := path.Dir(fullPath)
|
||||||
|
stat, err := storage.share.Stat(dir)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = storage.share.MkdirAll(dir, 0744)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 {
|
||||||
|
return fmt.Errorf("The path %s is not a directory or symlink", dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||||
|
suffix := make([]byte, 8)
|
||||||
|
for i := range suffix {
|
||||||
|
suffix[i] = letters[rand.Intn(len(letters))]
|
||||||
|
}
|
||||||
|
|
||||||
|
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
||||||
|
|
||||||
|
file, err := storage.share.Create(temporaryFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
|
_, err = io.Copy(file, reader)
|
||||||
|
if err != nil {
|
||||||
|
file.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = file.Sync(); err != nil {
|
||||||
|
pathErr, ok := err.(*os.PathError)
|
||||||
|
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
|
||||||
|
if !isNotSupported {
|
||||||
|
_ = file.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.share.Rename(temporaryFile, fullPath)
|
||||||
|
if err != nil {
|
||||||
|
|
||||||
|
if _, e := storage.share.Stat(fullPath); e == nil {
|
||||||
|
storage.share.Remove(temporaryFile)
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *SambaStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *SambaStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *SambaStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *SambaStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *SambaStorage) EnableTestMode() {}
|
||||||
@@ -144,14 +144,14 @@ func (storage *SFTPStorage) retry(f func () error) error {
|
|||||||
storage.clientLock.Lock()
|
storage.clientLock.Lock()
|
||||||
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
|
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
|
LOG_WARN("SFTP_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
|
||||||
storage.clientLock.Unlock()
|
storage.clientLock.Unlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := sftp.NewClient(connection)
|
client, err := sftp.NewClient(connection)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
|
LOG_WARN("SFTP_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
|
||||||
connection.Close()
|
connection.Close()
|
||||||
storage.clientLock.Unlock()
|
storage.clientLock.Unlock()
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -120,6 +120,12 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe
|
|||||||
return chunk.GetBytes()
|
return chunk.GetBytes()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if chunk != nil {
|
||||||
|
config.PutChunk(chunk)
|
||||||
|
}
|
||||||
|
} ()
|
||||||
|
|
||||||
// Normally if Version is 0 then the snapshot is created by CLI v2 but unfortunately CLI 3.0.1 does not set the
|
// Normally if Version is 0 then the snapshot is created by CLI v2 but unfortunately CLI 3.0.1 does not set the
|
||||||
// version bit correctly when copying old backups. So we need to check the first byte -- if it is '[' then it is
|
// version bit correctly when copying old backups. So we need to check the first byte -- if it is '[' then it is
|
||||||
// the old format. The new format starts with a string encoded in msgpack and the first byte can't be '['.
|
// the old format. The new format starts with a string encoded in msgpack and the first byte can't be '['.
|
||||||
|
|||||||
@@ -448,7 +448,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all
|
|||||||
|
|
||||||
allFiles, _ := manager.ListAllFiles(manager.snapshotCache, chunkDir)
|
allFiles, _ := manager.ListAllFiles(manager.snapshotCache, chunkDir)
|
||||||
for _, file := range allFiles {
|
for _, file := range allFiles {
|
||||||
if file[len(file)-1] != '/' {
|
if len(file) > 0 && file[len(file)-1] != '/' {
|
||||||
chunkID := strings.Replace(file, "/", "", -1)
|
chunkID := strings.Replace(file, "/", "", -1)
|
||||||
if _, found := chunks[chunkID]; !found {
|
if _, found := chunks[chunkID]; !found {
|
||||||
LOG_DEBUG("SNAPSHOT_CLEAN", "Delete chunk %s from the snapshot cache", chunkID)
|
LOG_DEBUG("SNAPSHOT_CLEAN", "Delete chunk %s from the snapshot cache", chunkID)
|
||||||
@@ -1019,6 +1019,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
numberOfVerifiedChunks := len(verifiedChunks)
|
numberOfVerifiedChunks := len(verifiedChunks)
|
||||||
|
|
||||||
saveVerifiedChunks := func() {
|
saveVerifiedChunks := func() {
|
||||||
|
verifiedChunksLock.Lock()
|
||||||
|
defer verifiedChunksLock.Unlock()
|
||||||
if len(verifiedChunks) > numberOfVerifiedChunks {
|
if len(verifiedChunks) > numberOfVerifiedChunks {
|
||||||
var description []byte
|
var description []byte
|
||||||
description, err = json.Marshal(verifiedChunks)
|
description, err = json.Marshal(verifiedChunks)
|
||||||
@@ -2426,7 +2428,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
|
|||||||
|
|
||||||
allFiles, _ := manager.ListAllFiles(manager.storage, chunkDir)
|
allFiles, _ := manager.ListAllFiles(manager.storage, chunkDir)
|
||||||
for _, file := range allFiles {
|
for _, file := range allFiles {
|
||||||
if file[len(file)-1] == '/' {
|
if len(file) == 0 || file[len(file)-1] == '/' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -261,7 +261,8 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
return fileStorage
|
return fileStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
urlRegex := regexp.MustCompile(`^([\w-]+)://([\w\-@\.]+@)?([^/]+)(/(.+))?`)
|
// Added \! to matched[2] because OneDrive drive ids contain ! (e.g. "b!xxx")
|
||||||
|
urlRegex := regexp.MustCompile(`^([\w-]+)://([\w\-@\.\!]+@)?([^/]+)(/(.+))?`)
|
||||||
|
|
||||||
matched := urlRegex.FindStringSubmatch(storageURL)
|
matched := urlRegex.FindStringSubmatch(storageURL)
|
||||||
|
|
||||||
@@ -644,15 +645,40 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
SavePassword(preference, "gcd_token", tokenFile)
|
SavePassword(preference, "gcd_token", tokenFile)
|
||||||
return gcdStorage
|
return gcdStorage
|
||||||
} else if matched[1] == "one" || matched[1] == "odb" {
|
} else if matched[1] == "one" || matched[1] == "odb" {
|
||||||
|
// Handle writing directly to the root of the drive
|
||||||
|
// For odb://drive_id@/, drive_id@ is match[3] not match[2]
|
||||||
|
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
|
||||||
|
matched[2], matched[3] = matched[3], matched[2]
|
||||||
|
}
|
||||||
|
drive_id := matched[2]
|
||||||
|
if len(drive_id) > 0 {
|
||||||
|
drive_id = drive_id[:len(drive_id)-1]
|
||||||
|
}
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
||||||
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
|
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
|
||||||
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
|
|
||||||
|
// client_id, just like tokenFile, can be stored in preferences
|
||||||
|
//prompt = fmt.Sprintf("Enter client_id for custom Azure app (if empty will use duplicacy.com one):")
|
||||||
|
client_id := GetPasswordFromPreference(preference, matched[1] + "_client_id")
|
||||||
|
client_secret := ""
|
||||||
|
|
||||||
|
if client_id != "" {
|
||||||
|
// client_secret should go into keyring
|
||||||
|
prompt = fmt.Sprintf("Enter client_secret for custom Azure app (if empty will use duplicacy.com one):")
|
||||||
|
client_secret = GetPassword(preference, matched[1] + "_client_secret", prompt, true, resetPassword)
|
||||||
|
}
|
||||||
|
|
||||||
|
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads, client_id, client_secret, drive_id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
SavePassword(preference, matched[1] + "_token", tokenFile)
|
SavePassword(preference, matched[1] + "_token", tokenFile)
|
||||||
|
if client_id != "" {
|
||||||
|
SavePassword(preference, matched[1] + "_client_secret", client_secret)
|
||||||
|
}
|
||||||
return oneDriveStorage
|
return oneDriveStorage
|
||||||
} else if matched[1] == "hubic" {
|
} else if matched[1] == "hubic" {
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
@@ -731,6 +757,43 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return storjStorage
|
return storjStorage
|
||||||
|
} else if matched[1] == "smb" {
|
||||||
|
server := matched[3]
|
||||||
|
username := matched[2]
|
||||||
|
if username == "" {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the SAMBA storage")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
username = username[:len(username)-1]
|
||||||
|
storageDir := matched[5]
|
||||||
|
port := 445
|
||||||
|
|
||||||
|
if strings.Contains(server, ":") {
|
||||||
|
index := strings.Index(server, ":")
|
||||||
|
port, _ = strconv.Atoi(server[index+1:])
|
||||||
|
server = server[:index]
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(storageDir, "/") {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "No share name specified for the SAMBA storage")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
index := strings.Index(storageDir, "/")
|
||||||
|
shareName := storageDir[:index]
|
||||||
|
storageDir = storageDir[index+1:]
|
||||||
|
|
||||||
|
prompt := fmt.Sprintf("Enter the SAMBA password:")
|
||||||
|
password := GetPassword(preference, "smb_password", prompt, true, resetPassword)
|
||||||
|
sambaStorage, err := CreateSambaStorage(server, port, username, password, shareName, storageDir, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the SAMBA storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "smb_password", password)
|
||||||
|
return sambaStorage
|
||||||
|
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -136,15 +136,15 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if *testStorageName == "one" {
|
} else if *testStorageName == "one" {
|
||||||
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads, "", "", "")
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if *testStorageName == "odb" {
|
} else if *testStorageName == "odb" {
|
||||||
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
|
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads, "", "", "")
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if *testStorageName == "one" {
|
} else if *testStorageName == "one" {
|
||||||
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads, "", "", "")
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if *testStorageName == "hubic" {
|
} else if *testStorageName == "hubic" {
|
||||||
@@ -176,6 +176,21 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
}
|
}
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
|
} else if *testStorageName == "storj" {
|
||||||
|
storage, err := CreateStorjStorage(config["satellite"], config["key"], config["passphrase"], config["bucket"], config["storage_path"], threads)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if *testStorageName == "smb" {
|
||||||
|
port, _ := strconv.Atoi(config["port"])
|
||||||
|
storage, err := CreateSambaStorage(config["server"], port, config["username"], config["password"], config["share"], config["storage_path"], threads)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("Invalid storage named: %s", *testStorageName)
|
return nil, fmt.Errorf("Invalid storage named: %s", *testStorageName)
|
||||||
|
|||||||
Reference in New Issue
Block a user