mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-08 14:44:36 -06:00
Compare commits
4 Commits
v3.2.0
...
915776161f
| Author | SHA1 | Date | |
|---|---|---|---|
| 915776161f | |||
|
|
4e9d2c4cca | ||
|
|
cc482beb95 | ||
|
|
bf3ea8a83c |
@@ -368,9 +368,6 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
"The storage '%s' has already been initialized", preference.StorageURL)
|
"The storage '%s' has already been initialized", preference.StorageURL)
|
||||||
if existingConfig.CompressionLevel >= -1 && existingConfig.CompressionLevel <= 9 {
|
if existingConfig.CompressionLevel >= -1 && existingConfig.CompressionLevel <= 9 {
|
||||||
duplicacy.LOG_INFO("STORAGE_FORMAT", "This storage is configured to use the pre-1.2.0 format")
|
duplicacy.LOG_INFO("STORAGE_FORMAT", "This storage is configured to use the pre-1.2.0 format")
|
||||||
} else if existingConfig.CompressionLevel != 100 {
|
|
||||||
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "This storage is configured with an invalid compression level %d", existingConfig.CompressionLevel)
|
|
||||||
return
|
|
||||||
} else if existingConfig.CompressionLevel != duplicacy.DEFAULT_COMPRESSION_LEVEL {
|
} else if existingConfig.CompressionLevel != duplicacy.DEFAULT_COMPRESSION_LEVEL {
|
||||||
duplicacy.LOG_INFO("STORAGE_COMPRESSION", "Compression level: %d", existingConfig.CompressionLevel)
|
duplicacy.LOG_INFO("STORAGE_COMPRESSION", "Compression level: %d", existingConfig.CompressionLevel)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1558,7 +1558,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
|
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
|
||||||
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
|
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
|
||||||
|
|
||||||
if !manager.config.IsCompatiableWith(otherManager.config) {
|
if !manager.config.IsCompatibleWith(otherManager.config) {
|
||||||
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
|
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -169,10 +169,9 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
|
func (config *Config) IsCompatibleWith(otherConfig *Config) bool {
|
||||||
|
|
||||||
return config.CompressionLevel == otherConfig.CompressionLevel &&
|
return config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
||||||
config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
|
||||||
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
|
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
|
||||||
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
|
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
|
||||||
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
|
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
|
||||||
@@ -255,7 +254,6 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
|||||||
}
|
}
|
||||||
|
|
||||||
if copyFrom != nil {
|
if copyFrom != nil {
|
||||||
config.CompressionLevel = copyFrom.CompressionLevel
|
|
||||||
|
|
||||||
config.AverageChunkSize = copyFrom.AverageChunkSize
|
config.AverageChunkSize = copyFrom.AverageChunkSize
|
||||||
config.MaximumChunkSize = copyFrom.MaximumChunkSize
|
config.MaximumChunkSize = copyFrom.MaximumChunkSize
|
||||||
@@ -265,6 +263,8 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
|||||||
config.HashKey = copyFrom.HashKey
|
config.HashKey = copyFrom.HashKey
|
||||||
|
|
||||||
if bitCopy {
|
if bitCopy {
|
||||||
|
config.CompressionLevel = copyFrom.CompressionLevel
|
||||||
|
|
||||||
config.IDKey = copyFrom.IDKey
|
config.IDKey = copyFrom.IDKey
|
||||||
config.ChunkKey = copyFrom.ChunkKey
|
config.ChunkKey = copyFrom.ChunkKey
|
||||||
config.FileKey = copyFrom.FileKey
|
config.FileKey = copyFrom.FileKey
|
||||||
|
|||||||
@@ -90,48 +90,40 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
dir = storage.storageDir + dir
|
dir = storage.storageDir + dir
|
||||||
input := s3.ListObjectsInput{
|
input := s3.ListObjectsV2Input{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Prefix: aws.String(dir),
|
Prefix: aws.String(dir),
|
||||||
Delimiter: aws.String("/"),
|
Delimiter: aws.String("/"),
|
||||||
MaxKeys: aws.Int64(1000),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := storage.client.ListObjects(&input)
|
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||||
|
for _, subDir := range page.CommonPrefixes {
|
||||||
|
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, subDir := range output.CommonPrefixes {
|
|
||||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
|
||||||
}
|
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else {
|
} else {
|
||||||
dir = storage.storageDir + dir
|
dir = storage.storageDir + dir
|
||||||
marker := ""
|
input := s3.ListObjectsV2Input{
|
||||||
for {
|
Bucket: aws.String(storage.bucket),
|
||||||
input := s3.ListObjectsInput{
|
Prefix: aws.String(dir),
|
||||||
Bucket: aws.String(storage.bucket),
|
MaxKeys: aws.Int64(1000),
|
||||||
Prefix: aws.String(dir),
|
}
|
||||||
MaxKeys: aws.Int64(1000),
|
|
||||||
Marker: aws.String(marker),
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := storage.client.ListObjects(&input)
|
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||||
if err != nil {
|
for _, object := range page.Contents {
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, object := range output.Contents {
|
|
||||||
files = append(files, (*object.Key)[len(dir):])
|
files = append(files, (*object.Key)[len(dir):])
|
||||||
sizes = append(sizes, *object.Size)
|
sizes = append(sizes, *object.Size)
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
if !*output.IsTruncated {
|
})
|
||||||
break
|
if err != nil {
|
||||||
}
|
return nil, nil, err
|
||||||
|
|
||||||
marker = *output.Contents[len(output.Contents)-1].Key
|
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user