mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-07 06:04:38 -06:00
Compare commits
4 Commits
v3.2.0
...
915776161f
| Author | SHA1 | Date | |
|---|---|---|---|
| 915776161f | |||
|
|
4e9d2c4cca | ||
|
|
cc482beb95 | ||
|
|
bf3ea8a83c |
@@ -368,9 +368,6 @@ func configRepository(context *cli.Context, init bool) {
|
||||
"The storage '%s' has already been initialized", preference.StorageURL)
|
||||
if existingConfig.CompressionLevel >= -1 && existingConfig.CompressionLevel <= 9 {
|
||||
duplicacy.LOG_INFO("STORAGE_FORMAT", "This storage is configured to use the pre-1.2.0 format")
|
||||
} else if existingConfig.CompressionLevel != 100 {
|
||||
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "This storage is configured with an invalid compression level %d", existingConfig.CompressionLevel)
|
||||
return
|
||||
} else if existingConfig.CompressionLevel != duplicacy.DEFAULT_COMPRESSION_LEVEL {
|
||||
duplicacy.LOG_INFO("STORAGE_COMPRESSION", "Compression level: %d", existingConfig.CompressionLevel)
|
||||
}
|
||||
|
||||
@@ -1558,7 +1558,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
|
||||
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
|
||||
|
||||
if !manager.config.IsCompatiableWith(otherManager.config) {
|
||||
if !manager.config.IsCompatibleWith(otherManager.config) {
|
||||
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -169,10 +169,9 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
|
||||
func (config *Config) IsCompatibleWith(otherConfig *Config) bool {
|
||||
|
||||
return config.CompressionLevel == otherConfig.CompressionLevel &&
|
||||
config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
||||
return config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
||||
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
|
||||
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
|
||||
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
|
||||
@@ -255,7 +254,6 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
||||
}
|
||||
|
||||
if copyFrom != nil {
|
||||
config.CompressionLevel = copyFrom.CompressionLevel
|
||||
|
||||
config.AverageChunkSize = copyFrom.AverageChunkSize
|
||||
config.MaximumChunkSize = copyFrom.MaximumChunkSize
|
||||
@@ -265,6 +263,8 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
||||
config.HashKey = copyFrom.HashKey
|
||||
|
||||
if bitCopy {
|
||||
config.CompressionLevel = copyFrom.CompressionLevel
|
||||
|
||||
config.IDKey = copyFrom.IDKey
|
||||
config.ChunkKey = copyFrom.ChunkKey
|
||||
config.FileKey = copyFrom.FileKey
|
||||
|
||||
@@ -90,48 +90,40 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
||||
|
||||
if dir == "snapshots/" {
|
||||
dir = storage.storageDir + dir
|
||||
input := s3.ListObjectsInput{
|
||||
input := s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
Delimiter: aws.String("/"),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
}
|
||||
|
||||
output, err := storage.client.ListObjects(&input)
|
||||
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, subDir := range page.CommonPrefixes {
|
||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, subDir := range output.CommonPrefixes {
|
||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||
}
|
||||
return files, nil, nil
|
||||
} else {
|
||||
dir = storage.storageDir + dir
|
||||
marker := ""
|
||||
for {
|
||||
input := s3.ListObjectsInput{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
Marker: aws.String(marker),
|
||||
}
|
||||
input := s3.ListObjectsV2Input{
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Prefix: aws.String(dir),
|
||||
MaxKeys: aws.Int64(1000),
|
||||
}
|
||||
|
||||
output, err := storage.client.ListObjects(&input)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, object := range output.Contents {
|
||||
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
|
||||
for _, object := range page.Contents {
|
||||
files = append(files, (*object.Key)[len(dir):])
|
||||
sizes = append(sizes, *object.Size)
|
||||
}
|
||||
|
||||
if !*output.IsTruncated {
|
||||
break
|
||||
}
|
||||
|
||||
marker = *output.Contents[len(output.Contents)-1].Key
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return files, sizes, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user