Compare commits

..

4 Commits

Author SHA1 Message Date
915776161f Use S3 ListObjectsV2 for listing files
ListObjects has been deprecated since 2016 and ListObjectsV2 with use of
explicit pagination tokens is more performant for large listings as well.

This also mitigates an issue with iDrive E2 where the StartAfter/Marker
is included in the output, leading to duplicate entries. Right now this
causes an exhaustive prune to delete chunks erroneously flagged as
duplicate, destroying the storage.
2023-09-23 22:17:22 -05:00
Gilbert Chen
4e9d2c4cca Allow two copy-compatible storages to have different compression levels
This is useful for upgrading an existing storage to zstd compression or others.
Chunks need to be decompressed and re-compressed during copy anyway.  Only
the bit-identical option requires the same compression level

Also fix a typo: compatiable -> compatible
2023-09-18 14:44:41 -04:00
gilbertchen
cc482beb95 Merge pull request #653 from gorbak25/fix-compression
Fix compression level check
2023-09-18 10:50:55 -04:00
Grzegorz Uriasz
bf3ea8a83c Fix compression level check 2023-09-11 23:03:34 +02:00
4 changed files with 23 additions and 34 deletions

View File

@@ -368,9 +368,6 @@ func configRepository(context *cli.Context, init bool) {
"The storage '%s' has already been initialized", preference.StorageURL)
if existingConfig.CompressionLevel >= -1 && existingConfig.CompressionLevel <= 9 {
duplicacy.LOG_INFO("STORAGE_FORMAT", "This storage is configured to use the pre-1.2.0 format")
} else if existingConfig.CompressionLevel != 100 {
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "This storage is configured with an invalid compression level %d", existingConfig.CompressionLevel)
return
} else if existingConfig.CompressionLevel != duplicacy.DEFAULT_COMPRESSION_LEVEL {
duplicacy.LOG_INFO("STORAGE_COMPRESSION", "Compression level: %d", existingConfig.CompressionLevel)
}

View File

@@ -1558,7 +1558,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
if !manager.config.IsCompatiableWith(otherManager.config) {
if !manager.config.IsCompatibleWith(otherManager.config) {
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
return false
}

View File

@@ -169,10 +169,9 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
return nil
}
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
func (config *Config) IsCompatibleWith(otherConfig *Config) bool {
return config.CompressionLevel == otherConfig.CompressionLevel &&
config.AverageChunkSize == otherConfig.AverageChunkSize &&
return config.AverageChunkSize == otherConfig.AverageChunkSize &&
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
@@ -255,7 +254,6 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
}
if copyFrom != nil {
config.CompressionLevel = copyFrom.CompressionLevel
config.AverageChunkSize = copyFrom.AverageChunkSize
config.MaximumChunkSize = copyFrom.MaximumChunkSize
@@ -265,6 +263,8 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
config.HashKey = copyFrom.HashKey
if bitCopy {
config.CompressionLevel = copyFrom.CompressionLevel
config.IDKey = copyFrom.IDKey
config.ChunkKey = copyFrom.ChunkKey
config.FileKey = copyFrom.FileKey

View File

@@ -90,48 +90,40 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
if dir == "snapshots/" {
dir = storage.storageDir + dir
input := s3.ListObjectsInput{
input := s3.ListObjectsV2Input{
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1000),
}
output, err := storage.client.ListObjects(&input)
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, subDir := range page.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return true
})
if err != nil {
return nil, nil, err
}
for _, subDir := range output.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return files, nil, nil
} else {
dir = storage.storageDir + dir
marker := ""
for {
input := s3.ListObjectsInput{
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000),
Marker: aws.String(marker),
}
input := s3.ListObjectsV2Input{
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000),
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
for _, object := range output.Contents {
err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, object := range page.Contents {
files = append(files, (*object.Key)[len(dir):])
sizes = append(sizes, *object.Size)
}
if !*output.IsTruncated {
break
}
marker = *output.Contents[len(output.Contents)-1].Key
return true
})
if err != nil {
return nil, nil, err
}
return files, sizes, nil
}