Compare commits

...

19 Commits

Author SHA1 Message Date
Gilbert Chen
981efc13e6 Bump version to 3.2.0 2023-09-08 15:25:22 -04:00
Gilbert Chen
6445ecbcde Add dependencies required by github.com/hirochachacha/go-smb2 2023-09-08 13:54:03 -04:00
Gilbert Chen
ff207ba5bf Convert the file path to the real one when downloading a chunk
This is mainly to handle the case when a fossil needs to be downloaded.
This happens when a metadata chunk has been marked as a fossil while the
correpsonding snapshot must be reconstructed for determine referenced
chunks.
2023-09-08 13:30:28 -04:00
Gilbert Chen
3a81c1065a Add a new Samba backend
The storage url is smb://user@server[:port]/share/path.  The password can be
set in the environment variable DUPLICACY_SMB_PASSWORD for default storage or
DUPLICACY_<STORAGE_NAME>_SMB_PASSWORD.

This backend is based on https://github.com/hirochachacha/go-smb2.  The
previous samba:// backend is just an alias for the disk-based backend with
caching enabled.
2023-07-05 22:51:24 -04:00
Gilbert Chen
cdf8f5a857 Check the length of 'file' before checking if it ends with '/' 2023-04-09 22:11:08 -04:00
Gilbert Chen
1f9ad0e35c B2 backend should be able to download .fsl files
This is needed when a metadata chunk has been turned into a fossil.  In B2,
the fossil file is the last version of the file with an 'upload' action, so
to download the file, the 'b2_list_file_versions' api is called to find the
file id and then download it using 'b2_download_file_by_id'.

This is needed when a metadata chunk has been turned into a fossil
2023-03-30 13:27:29 -04:00
Gilbert Chen
53b0f3f7b6 Implement zstd compression
Zstd compression can be enabled by providing `-zstd` or `-zstd-level <level>`
to `init`, `add`, or `backup`. With `-zstd` the compression level will be
`default`, and with `-zstd-level` the level can be any of `fastest`, `default`,
`better`, or `best`.
2023-03-26 21:31:51 -04:00
Gilbert Chen
9f276047db Fixed a typo in log id
SFT_RECONNECT -> SFTP_RECONNECT
2023-03-26 21:26:51 -04:00
gilbertchen
c237269589 Merge pull request #649 from northnose/saveVerifiedChunksLock
Acquire verifiedChunksLock in saveVerifiedChunks
2023-03-23 22:11:53 -04:00
gilbertchen
493ef603e3 Merge pull request #648 from northnose/dropbox-pointer
Upgrade go-dropbox to the latest
2023-03-23 22:10:24 -04:00
David Zhang
889191a814 Upgrade go-dropbox to the latest 2023-03-20 20:19:43 -07:00
David Zhang
df80096cdf Acquire verifiedChunksLock in saveVerifiedChunks 2023-03-12 21:00:51 -07:00
gilbertchen
24c2ea76b9 Merge pull request #633 from sevimo123/sharepoint_support
Sharepoint support
2023-01-19 14:06:42 -05:00
gilbertchen
15b6ef9d76 Merge pull request #632 from sevimo123/custom_odb_creds
CLI support for custom credentials for OneDrive (client_id/client_secret)
2023-01-19 13:01:01 -05:00
gilbertchen
75b310b98e Merge pull request #641 from A-wels/patch-1
Fixed typo incomlete -> incomplete
2023-01-17 11:51:20 -05:00
Alexander Welsing
039b749a3e Fixed typo incomlete -> incomplete
"Previous incomlete backup contains %d files and %d chunks -> "Previous incomplete backup contains %d files and %d chunks
2023-01-12 12:41:45 +01:00
Gilbert Chen
9be475f876 Fix another chunk leak in listing files in a revision.
This bug leaks a chunk every time files in a revision are listed.  Not a big
deal for backup and restore, but it becomes problematic when listing files in
many revisions for commands such check and history.
2023-01-06 23:02:48 -05:00
Victor Mozgin
d7593a828c Added support for SharePoint document libraries via odb://DRIVEID@path/to/storage. 2022-07-22 23:31:27 -04:00
Victor Mozgin
238ef63e16 CLI support for custom credentials for OneDrive (client_id/client_secret) 2022-07-22 21:18:52 -04:00
17 changed files with 569 additions and 43 deletions

View File

@@ -371,6 +371,8 @@ func configRepository(context *cli.Context, init bool) {
} else if existingConfig.CompressionLevel != 100 {
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "This storage is configured with an invalid compression level %d", existingConfig.CompressionLevel)
return
} else if existingConfig.CompressionLevel != duplicacy.DEFAULT_COMPRESSION_LEVEL {
duplicacy.LOG_INFO("STORAGE_COMPRESSION", "Compression level: %d", existingConfig.CompressionLevel)
}
// Don't print config in the background mode
@@ -378,8 +380,6 @@ func configRepository(context *cli.Context, init bool) {
existingConfig.Print()
}
} else {
compressionLevel := 100
averageChunkSize := duplicacy.AtoSize(context.String("chunk-size"))
if averageChunkSize == 0 {
fmt.Fprintf(context.App.Writer, "Invalid average chunk size: %s.\n\n", context.String("chunk-size"))
@@ -487,6 +487,18 @@ func configRepository(context *cli.Context, init bool) {
}
}
compressionLevel := 100
zstdLevel := context.String("zstd-level")
if zstdLevel != "" {
if level, found := duplicacy.ZSTD_COMPRESSION_LEVELS[zstdLevel]; found {
compressionLevel = level
} else {
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "Invalid zstd compression level: %s", zstdLevel)
}
} else if context.Bool("zstd") {
compressionLevel = duplicacy.ZSTD_COMPRESSION_LEVEL_DEFAULT
}
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
}
@@ -786,6 +798,17 @@ func backupRepository(context *cli.Context) {
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SetDryRun(dryRun)
zstdLevel := context.String("zstd-level")
if zstdLevel != "" {
if level, found := duplicacy.ZSTD_COMPRESSION_LEVELS[zstdLevel]; found {
backupManager.SetCompressionLevel(level)
} else {
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "Invalid zstd compression level: %s", zstdLevel)
}
} else if context.Bool("zstd") {
backupManager.SetCompressionLevel(duplicacy.ZSTD_COMPRESSION_LEVEL_DEFAULT)
}
metadataChunkSize := context.Int("metadata-chunk-size")
maximumInMemoryEntries := context.Int("max-in-memory-entries")
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly, metadataChunkSize, maximumInMemoryEntries)
@@ -1428,6 +1451,15 @@ func main() {
Usage: "the minimum size of chunks (defaults to chunk-size/4)",
Argument: "<size>",
},
cli.StringFlag{
Name: "zstd-level",
Usage: "set zstd compression level (fast, default, better, or best)",
Argument: "<level>",
},
cli.BoolFlag{
Name: "zstd",
Usage: "short for -zstd default",
},
cli.IntFlag{
Name: "iterations",
Usage: "the number of iterations used in storage key derivation (default is 16384)",
@@ -1495,6 +1527,15 @@ func main() {
Name: "dry-run",
Usage: "dry run for testing, don't backup anything. Use with -stats and -d",
},
cli.StringFlag{
Name: "zstd-level",
Usage: "set zstd compression level (fast, default, better, or best)",
Argument: "<level>",
},
cli.BoolFlag{
Name: "zstd",
Usage: "short for -zstd default",
},
cli.BoolFlag{
Name: "vss",
Usage: "enable the Volume Shadow Copy service (Windows and macOS using APFS only)",
@@ -1938,6 +1979,15 @@ func main() {
Usage: "the minimum size of chunks (default is chunk-size/4)",
Argument: "<size>",
},
cli.StringFlag{
Name: "zstd-level",
Usage: "set zstd compression level (fast, default, better, or best)",
Argument: "<level>",
},
cli.BoolFlag{
Name: "zstd",
Usage: "short for -zstd default",
},
cli.IntFlag{
Name: "iterations",
Usage: "the number of iterations used in storage key derivation (default is 16384)",
@@ -2215,7 +2265,7 @@ func main() {
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "3.1.0" + " (" + GitCommit + ")"
app.Version = "3.2.0" + " (" + GitCommit + ")"
// Exit with code 2 if an invalid command is provided
app.CommandNotFound = func(context *cli.Context, command string) {

4
go.mod
View File

@@ -9,7 +9,7 @@ require (
github.com/bkaradzic/go-lz4 v1.0.0
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9
github.com/gilbertchen/go-dropbox v0.0.0-20221207034530-08c0c180a4f9
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916
github.com/gilbertchen/go-ole v1.2.0
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c
@@ -40,7 +40,9 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/hirochachacha/go-smb2 v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/klauspost/compress v1.16.3 // indirect
github.com/klauspost/cpuid v1.3.1 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kr/text v0.2.0 // indirect

9
go.sum
View File

@@ -49,6 +49,8 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible h1:2fZxTUw5D9uGWnYTsU/obVavn+1qTF+TsVok3U8uN2Q=
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible/go.mod h1:qsVRCpBUm2l0eMUeI9wZ47yzra2+lv2YkGhMZpzBVUc=
@@ -60,6 +62,8 @@ github.com/gilbertchen/go-dropbox v0.0.0-20221128142034-9910c19f1d13 h1:54e1HiEX
github.com/gilbertchen/go-dropbox v0.0.0-20221128142034-9910c19f1d13/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
github.com/gilbertchen/go-dropbox v0.0.0-20221207034530-08c0c180a4f9 h1:3hJHxOyf/rAWWz9GNyai0hSt56vGMATS9B6yjw/bzzk=
github.com/gilbertchen/go-dropbox v0.0.0-20221207034530-08c0c180a4f9/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916 h1:7VpJiGwW51MB7yJ5e27Ar/ej8Yu7WuU2SEo409qPoNs=
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
github.com/gilbertchen/go-ole v1.2.0 h1:ay65uwxo6w8UVOxN0+fuCqUXGaXxbmkGs5m4uY6e1Zw=
github.com/gilbertchen/go-ole v1.2.0/go.mod h1:NNiozp7QxhyGmHxxNdFKIcVaINvJFTAjBJ2gYzh8fsg=
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2 h1:VDPwi3huqeJBtymgLOvPAP4S2gbSSK/UrWVwRbRAmnw=
@@ -132,6 +136,8 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
@@ -141,6 +147,8 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
@@ -281,6 +289,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 h1:71vQrMauZZhcTVK6KdYM+rklehEEwb3E+ZhaE5jrPrE=
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=

View File

@@ -329,7 +329,7 @@ func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bo
if client.DownloadURL == "" {
client.DownloadURL = output.DownloadURL
}
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
LOG_INFO("BACKBLAZE_URL", "Download URL is: %s", client.DownloadURL)
client.IsAuthorized = true
client.LastAuthorizationTime = time.Now().Unix()
@@ -584,8 +584,26 @@ func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID strin
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
if !strings.HasSuffix(filePath, ".fsl") {
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
return readCloser, len, err
}
// We're trying to download a fossil file. We need to find the file ID of the last 'upload' of the file.
filePath = strings.TrimSuffix(filePath, ".fsl")
entries, err := client.ListFileNames(threadIndex, filePath, true, true)
fileId := ""
for _, entry := range entries {
if entry.FileName == filePath && entry.Action == "upload" && entry.Size > 0 {
fileId = entry.FileID
break
}
}
// Proceed with the b2_download_file_by_id call
url := client.getAPIURL() + "/b2api/v1/b2_download_file_by_id?fileId=" + fileId
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
return readCloser, len, err
}

View File

@@ -47,6 +47,10 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
manager.config.dryRun = dryRun
}
func (manager *BackupManager) SetCompressionLevel(level int) {
manager.config.CompressionLevel = level
}
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
// master key which can be nil if encryption is not enabled.
@@ -138,6 +142,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
manager.config.PrintCompressionLevel()
if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
manager.config.DataShards, manager.config.ParityShards)

View File

@@ -24,6 +24,7 @@ import (
"github.com/bkaradzic/go-lz4"
"github.com/minio/highwayhash"
"github.com/klauspost/reedsolomon"
"github.com/klauspost/compress/zstd"
// This is a fork of github.com/minio/highwayhash at 1.0.1 that computes incorrect hash on
// arm64 machines. We need this fork to be able to read the chunks created by Duplicacy
@@ -267,6 +268,38 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isMetada
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
deflater.Write(chunk.buffer.Bytes())
deflater.Close()
} else if chunk.config.CompressionLevel >= ZSTD_COMPRESSION_LEVEL_FASTEST && chunk.config.CompressionLevel <= ZSTD_COMPRESSION_LEVEL_BEST {
encryptedBuffer.Write([]byte("ZSTD"))
compressionLevel := zstd.SpeedDefault
if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_FASTEST {
compressionLevel = zstd.SpeedFastest
} else if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_BETTER {
compressionLevel = zstd.SpeedBetterCompression
} else if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_BEST {
compressionLevel = zstd.SpeedBestCompression
}
deflater, err := zstd.NewWriter(encryptedBuffer, zstd.WithEncoderLevel(compressionLevel))
if err != nil {
return err
}
// Make sure we have enough space in encryptedBuffer
availableLength := encryptedBuffer.Cap() - len(encryptedBuffer.Bytes())
maximumLength := deflater.MaxEncodedSize(chunk.buffer.Len())
if availableLength < maximumLength {
encryptedBuffer.Grow(maximumLength - availableLength)
}
_, err = deflater.Write(chunk.buffer.Bytes())
if err != nil {
return fmt.Errorf("ZSTD compression error: %v", err)
}
err = deflater.Close()
if err != nil {
return fmt.Errorf("ZSTD compression error: %v", err)
}
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
encryptedBuffer.Write([]byte("LZ4 "))
// Make sure we have enough space in encryptedBuffer
@@ -361,7 +394,6 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isMetada
chunk.buffer.Write(header)
return nil
}
// This is to ensure compatibility with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
@@ -633,6 +665,24 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
chunk.hash = nil
return nil, rewriteNeeded
}
if len(compressed) > 4 && string(compressed[:4]) == "ZSTD" {
chunk.buffer.Reset()
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
chunk.hash = nil
encryptedBuffer.Read(encryptedBuffer.Bytes()[:4])
inflater, err := zstd.NewReader(encryptedBuffer)
if err != nil {
return err, false
}
defer inflater.Close()
if _, err = io.Copy(chunk, inflater); err != nil {
return err, false
}
return nil, rewriteNeeded
}
inflater, err := zlib.NewReader(encryptedBuffer)
if err != nil {
return err, false

View File

@@ -35,6 +35,19 @@ var DEFAULT_KEY = []byte("duplicacy")
// standard zlib levels of -1 to 9.
var DEFAULT_COMPRESSION_LEVEL = 100
// zstd compression levels starting from 200
var ZSTD_COMPRESSION_LEVEL_FASTEST = 200
var ZSTD_COMPRESSION_LEVEL_DEFAULT = 201
var ZSTD_COMPRESSION_LEVEL_BETTER = 202
var ZSTD_COMPRESSION_LEVEL_BEST = 203
var ZSTD_COMPRESSION_LEVELS = map[string]int {
"fastest": ZSTD_COMPRESSION_LEVEL_FASTEST,
"default": ZSTD_COMPRESSION_LEVEL_DEFAULT,
"better": ZSTD_COMPRESSION_LEVEL_BETTER,
"best": ZSTD_COMPRESSION_LEVEL_BEST,
}
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
var CONFIG_BANNER = "duplicacy\001"
@@ -202,6 +215,14 @@ func (config *Config) Print() {
}
func (config *Config) PrintCompressionLevel() {
for name, level := range ZSTD_COMPRESSION_LEVELS {
if level == config.CompressionLevel {
LOG_INFO("COMPRESSION_LEVEL", "Zstd compression is enabled (level: %s)", name)
}
}
}
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
isEncrypted bool, copyFrom *Config, bitCopy bool) (config *Config) {
@@ -294,7 +315,10 @@ func (config *Config) PutChunk(chunk *Chunk) {
}
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
// Early versions of Duplicacy used SHA256 as the hash function for chunk IDs at the time when
// only zlib compression was supported. Later SHA256 was replaced by Blake2b and LZ4 was used
// for compression (with compression level set to 100).
if config.CompressionLevel >= DEFAULT_COMPRESSION_LEVEL {
hasher, err := blake2.New(&blake2.Config{Size: 32, Key: key})
if err != nil {
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
@@ -339,7 +363,7 @@ func (hasher *DummyHasher) BlockSize() int {
func (config *Config) NewFileHasher() hash.Hash {
if SkipFileHash {
return &DummyHasher{}
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
} else if config.CompressionLevel >= DEFAULT_COMPRESSION_LEVEL {
hasher, _ := blake2.New(&blake2.Config{Size: 32})
return hasher
} else {

View File

@@ -550,7 +550,7 @@ func loadIncompleteSnapshot(snapshotID string, cachePath string) *EntryList {
}
}
LOG_INFO("INCOMPLETE_LOAD", "Previous incomlete backup contains %d files and %d chunks",
LOG_INFO("INCOMPLETE_LOAD", "Previous incomplete backup contains %d files and %d chunks",
entryList.NumberOfEntries, len(entryList.PreservedChunkLengths) + len(entryList.UploadedChunkHashes))
return entryList
@@ -571,4 +571,4 @@ func deleteIncompleteSnapshot(cachePath string) {
}
}
}

View File

@@ -775,7 +775,7 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
// We never download the fossil so there is no need to convert the path
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
fileID, err := storage.getIDFromPath(threadIndex, storage.convertFilePath(filePath), false)
if err != nil {
return err
}

View File

@@ -5,6 +5,7 @@
package duplicacy
import (
"context"
"bytes"
"encoding/json"
"fmt"
@@ -39,6 +40,7 @@ type OneDriveClient struct {
TokenFile string
Token *oauth2.Token
OAConfig *oauth2.Config
TokenLock *sync.Mutex
IsConnected bool
@@ -49,7 +51,7 @@ type OneDriveClient struct {
APIURL string
}
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
func NewOneDriveClient(tokenFile string, isBusiness bool, client_id string, client_secret string, drive_id string) (*OneDriveClient, error) {
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
@@ -65,16 +67,34 @@ func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, erro
HTTPClient: http.DefaultClient,
TokenFile: tokenFile,
Token: token,
OAConfig: nil,
TokenLock: &sync.Mutex{},
IsBusiness: isBusiness,
}
if (client_id != "") {
oneOauthConfig := oauth2.Config{
ClientID: client_id,
ClientSecret: client_secret,
Scopes: []string{"Files.ReadWrite", "offline_access"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
},
}
client.OAConfig = &oneOauthConfig
}
if isBusiness {
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
client.APIURL = "https://graph.microsoft.com/v1.0/me"
client.APIURL = "https://graph.microsoft.com/v1.0/me/drive"
if drive_id != "" {
client.APIURL = "https://graph.microsoft.com/v1.0/drives/"+drive_id
}
} else {
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
client.APIURL = "https://api.onedrive.com/v1.0"
client.APIURL = "https://api.onedrive.com/v1.0/drive"
}
client.RefreshToken(false)
@@ -218,15 +238,25 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) {
return nil
}
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
if (client.OAConfig == nil) {
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
defer readCloser.Close()
defer readCloser.Close()
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
return err
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
return err
}
} else {
ctx := context.Background()
tokenSource := client.OAConfig.TokenSource(ctx, client.Token)
token, err := tokenSource.Token()
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
client.Token = token
}
description, err := json.Marshal(client.Token)
@@ -258,9 +288,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
entries := []OneDriveEntry{}
url := client.APIURL + "/drive/root:/" + path + ":/children"
url := client.APIURL + "/root:/" + path + ":/children"
if path == "" {
url = client.APIURL + "/drive/root/children"
url = client.APIURL + "/root/children"
}
if client.TestMode {
url += "?top=8"
@@ -296,7 +326,8 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
url := client.APIURL + "/drive/root:/" + path
url := client.APIURL + "/root:/" + path
if path == "" { url = client.APIURL + "/root" }
url += "?select=id,name,size,folder"
readCloser, _, err := client.call(url, "GET", 0, "")
@@ -321,7 +352,7 @@ func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, err
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
url := client.APIURL + "/items/root:/" + path + ":/content"
return client.call(url, "GET", 0, "")
}
@@ -331,7 +362,7 @@ func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
// is smaller than 4MB for OneDrive Business
if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) {
url := client.APIURL + "/drive/root:/" + path + ":/content"
url := client.APIURL + "/root:/" + path + ":/content"
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
if err != nil {
@@ -365,7 +396,7 @@ func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string
},
}
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
readCloser, _, err := client.call(client.APIURL + "/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
if err != nil {
return "", err
}
@@ -409,7 +440,7 @@ func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte
func (client *OneDriveClient) DeleteFile(path string) error {
url := client.APIURL + "/drive/root:/" + path
url := client.APIURL + "/root:/" + path
readCloser, _, err := client.call(url, "DELETE", 0, "")
if err != nil {
@@ -422,10 +453,10 @@ func (client *OneDriveClient) DeleteFile(path string) error {
func (client *OneDriveClient) MoveFile(path string, parent string) error {
url := client.APIURL + "/drive/root:/" + path
url := client.APIURL + "/root:/" + path
parentReference := make(map[string]string)
parentReference["path"] = "/drive/root:/" + parent
parentReference["path"] = "/root:/" + parent
parameters := make(map[string]interface{})
parameters["parentReference"] = parentReference
@@ -477,7 +508,7 @@ func (client *OneDriveClient) CreateDirectory(path string, name string) error {
return fmt.Errorf("The path '%s' is not a directory", path)
}
url = client.APIURL + "/drive/root:/" + path + ":/children"
url = client.APIURL + "/root:/" + path + ":/children"
}
parameters := make(map[string]interface{})

View File

@@ -19,13 +19,13 @@ type OneDriveStorage struct {
}
// CreateOneDriveStorage creates an OneDrive storage object.
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int, client_id string, client_secret string, drive_id string) (storage *OneDriveStorage, err error) {
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
storagePath = storagePath[:len(storagePath)-1]
}
client, err := NewOneDriveClient(tokenFile, isBusiness)
client, err := NewOneDriveClient(tokenFile, isBusiness, client_id, client_secret, drive_id)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,250 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"io"
"os"
"fmt"
"net"
"path"
"time"
"strings"
"syscall"
"math/rand"
"github.com/hirochachacha/go-smb2"
)
// SambaStorage is a local on-disk file storage implementing the Storage interface.
type SambaStorage struct {
StorageBase
share *smb2.Share
storageDir string
numberOfThreads int
}
// CreateSambaStorage creates a file storage.
func CreateSambaStorage(server string, port int, username string, password string, shareName string, storageDir string, threads int) (storage *SambaStorage, err error) {
connection, err := net.Dial("tcp", fmt.Sprintf("%s:%d", server, port))
if err != nil {
return nil, err
}
dialer := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{
User: username,
Password: password,
},
}
client, err := dialer.Dial(connection)
if err != nil {
return nil, err
}
share, err := client.Mount(shareName)
if err != nil {
return nil, err
}
// Random number fo generating the temporary chunk file suffix.
rand.Seed(time.Now().UnixNano())
storage = &SambaStorage{
share: share,
numberOfThreads: threads,
}
exist, isDir, _, err := storage.GetFileInfo(0, storageDir)
if err != nil {
return nil, fmt.Errorf("Failed to check the storage path %s: %v", storageDir, err)
}
if !exist {
return nil, fmt.Errorf("The storage path %s does not exist", storageDir)
}
if !isDir {
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
}
storage.storageDir = storageDir
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
func (storage *SambaStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
fullPath := path.Join(storage.storageDir, dir)
list, err := storage.share.ReadDir(fullPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil, nil
}
return nil, nil, err
}
for _, f := range list {
name := f.Name()
if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' {
name += "/"
}
files = append(files, name)
sizes = append(sizes, f.Size())
}
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *SambaStorage) DeleteFile(threadIndex int, filePath string) (err error) {
err = storage.share.Remove(path.Join(storage.storageDir, filePath))
if err == nil || os.IsNotExist(err) {
return nil
} else {
return err
}
}
// MoveFile renames the file.
func (storage *SambaStorage) MoveFile(threadIndex int, from string, to string) (err error) {
return storage.share.Rename(path.Join(storage.storageDir, from), path.Join(storage.storageDir, to))
}
// CreateDirectory creates a new directory.
func (storage *SambaStorage) CreateDirectory(threadIndex int, dir string) (err error) {
fmt.Printf("Creating directory %s\n", dir)
err = storage.share.Mkdir(path.Join(storage.storageDir, dir), 0744)
if err != nil && os.IsExist(err) {
return nil
} else {
return err
}
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *SambaStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
stat, err := storage.share.Stat(path.Join(storage.storageDir, filePath))
if err != nil {
if os.IsNotExist(err) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
return true, stat.IsDir(), stat.Size(), nil
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *SambaStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, err := storage.share.Open(path.Join(storage.storageDir, filePath))
if err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
}
// UploadFile writes 'content' to the file at 'filePath'
func (storage *SambaStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
fullPath := path.Join(storage.storageDir, filePath)
if len(strings.Split(filePath, "/")) > 2 {
dir := path.Dir(fullPath)
stat, err := storage.share.Stat(dir)
if err != nil {
if !os.IsNotExist(err) {
return err
}
err = storage.share.MkdirAll(dir, 0744)
if err != nil {
return err
}
} else {
if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 {
return fmt.Errorf("The path %s is not a directory or symlink", dir)
}
}
}
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)
for i := range suffix {
suffix[i] = letters[rand.Intn(len(letters))]
}
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
file, err := storage.share.Create(temporaryFile)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
file.Close()
return err
}
if err = file.Sync(); err != nil {
pathErr, ok := err.(*os.PathError)
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
if !isNotSupported {
_ = file.Close()
return err
}
}
err = file.Close()
if err != nil {
return err
}
err = storage.share.Rename(temporaryFile, fullPath)
if err != nil {
if _, e := storage.share.Stat(fullPath); e == nil {
storage.share.Remove(temporaryFile)
return nil
} else {
return err
}
}
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *SambaStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *SambaStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *SambaStorage) IsStrongConsistent() bool { return true }
// If the storage supports fast listing of files names.
func (storage *SambaStorage) IsFastListing() bool { return false }
// Enable the test mode.
func (storage *SambaStorage) EnableTestMode() {}

View File

@@ -144,14 +144,14 @@ func (storage *SFTPStorage) retry(f func () error) error {
storage.clientLock.Lock()
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
LOG_WARN("SFTP_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
storage.clientLock.Unlock()
continue
}
client, err := sftp.NewClient(connection)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
LOG_WARN("SFTP_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
connection.Close()
storage.clientLock.Unlock()
continue

View File

@@ -120,6 +120,12 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe
return chunk.GetBytes()
})
defer func() {
if chunk != nil {
config.PutChunk(chunk)
}
} ()
// Normally if Version is 0 then the snapshot is created by CLI v2 but unfortunately CLI 3.0.1 does not set the
// version bit correctly when copying old backups. So we need to check the first byte -- if it is '[' then it is
// the old format. The new format starts with a string encoded in msgpack and the first byte can't be '['.

View File

@@ -448,7 +448,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all
allFiles, _ := manager.ListAllFiles(manager.snapshotCache, chunkDir)
for _, file := range allFiles {
if file[len(file)-1] != '/' {
if len(file) > 0 && file[len(file)-1] != '/' {
chunkID := strings.Replace(file, "/", "", -1)
if _, found := chunks[chunkID]; !found {
LOG_DEBUG("SNAPSHOT_CLEAN", "Delete chunk %s from the snapshot cache", chunkID)
@@ -1019,6 +1019,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
numberOfVerifiedChunks := len(verifiedChunks)
saveVerifiedChunks := func() {
verifiedChunksLock.Lock()
defer verifiedChunksLock.Unlock()
if len(verifiedChunks) > numberOfVerifiedChunks {
var description []byte
description, err = json.Marshal(verifiedChunks)
@@ -2426,7 +2428,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
allFiles, _ := manager.ListAllFiles(manager.storage, chunkDir)
for _, file := range allFiles {
if file[len(file)-1] == '/' {
if len(file) == 0 || file[len(file)-1] == '/' {
continue
}

View File

@@ -261,7 +261,8 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
return fileStorage
}
urlRegex := regexp.MustCompile(`^([\w-]+)://([\w\-@\.]+@)?([^/]+)(/(.+))?`)
// Added \! to matched[2] because OneDrive drive ids contain ! (e.g. "b!xxx")
urlRegex := regexp.MustCompile(`^([\w-]+)://([\w\-@\.\!]+@)?([^/]+)(/(.+))?`)
matched := urlRegex.FindStringSubmatch(storageURL)
@@ -644,15 +645,40 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
SavePassword(preference, "gcd_token", tokenFile)
return gcdStorage
} else if matched[1] == "one" || matched[1] == "odb" {
// Handle writing directly to the root of the drive
// For odb://drive_id@/, drive_id@ is match[3] not match[2]
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
matched[2], matched[3] = matched[3], matched[2]
}
drive_id := matched[2]
if len(drive_id) > 0 {
drive_id = drive_id[:len(drive_id)-1]
}
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
// client_id, just like tokenFile, can be stored in preferences
//prompt = fmt.Sprintf("Enter client_id for custom Azure app (if empty will use duplicacy.com one):")
client_id := GetPasswordFromPreference(preference, matched[1] + "_client_id")
client_secret := ""
if client_id != "" {
// client_secret should go into keyring
prompt = fmt.Sprintf("Enter client_secret for custom Azure app (if empty will use duplicacy.com one):")
client_secret = GetPassword(preference, matched[1] + "_client_secret", prompt, true, resetPassword)
}
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads, client_id, client_secret, drive_id)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, matched[1] + "_token", tokenFile)
if client_id != "" {
SavePassword(preference, matched[1] + "_client_secret", client_secret)
}
return oneDriveStorage
} else if matched[1] == "hubic" {
storagePath := matched[3] + matched[4]
@@ -731,6 +757,43 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
return nil
}
return storjStorage
} else if matched[1] == "smb" {
server := matched[3]
username := matched[2]
if username == "" {
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the SAMBA storage")
return nil
}
username = username[:len(username)-1]
storageDir := matched[5]
port := 445
if strings.Contains(server, ":") {
index := strings.Index(server, ":")
port, _ = strconv.Atoi(server[index+1:])
server = server[:index]
}
if !strings.Contains(storageDir, "/") {
LOG_ERROR("STORAGE_CREATE", "No share name specified for the SAMBA storage")
return nil
}
index := strings.Index(storageDir, "/")
shareName := storageDir[:index]
storageDir = storageDir[index+1:]
prompt := fmt.Sprintf("Enter the SAMBA password:")
password := GetPassword(preference, "smb_password", prompt, true, resetPassword)
sambaStorage, err := CreateSambaStorage(server, port, username, password, shareName, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the SAMBA storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "smb_password", password)
return sambaStorage
} else {
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
return nil

View File

@@ -136,15 +136,15 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads, "", "", "")
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "odb" {
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads, "", "", "")
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads, "", "", "")
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "hubic" {
@@ -176,6 +176,21 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "storj" {
storage, err := CreateStorjStorage(config["satellite"], config["key"], config["passphrase"], config["bucket"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "smb" {
port, _ := strconv.Atoi(config["port"])
storage, err := CreateSambaStorage(config["server"], port, config["username"], config["password"], config["share"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
}
return nil, fmt.Errorf("Invalid storage named: %s", *testStorageName)