Compare commits

..

1 Commits

Author SHA1 Message Date
f48630d8cc Use S3 ListObjectsV2 for listing files
ListObjects has been deprecated since 2016 and ListObjectsV2 with use of
explicit pagination tokens is more performant for large listings as well.

This also mitigates an issue with iDrive E2 where the StartAfter/Marker
is included in the output, leading to duplicate entries. Right now this
causes an exhaustive prune to delete chunks erroneously flagged as
duplicate, destroying the storage.
2023-09-29 20:33:17 -05:00
3 changed files with 19 additions and 29 deletions

View File

@@ -2262,7 +2262,7 @@ func main() {
app.Name = "duplicacy" app.Name = "duplicacy"
app.HelpName = "duplicacy" app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication" app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "3.2.2" + " (" + GitCommit + ")" app.Version = "3.2.0" + " (" + GitCommit + ")"
// Exit with code 2 if an invalid command is provided // Exit with code 2 if an invalid command is provided
app.CommandNotFound = func(context *cli.Context, command string) { app.CommandNotFound = func(context *cli.Context, command string) {

View File

@@ -90,48 +90,40 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
if dir == "snapshots/" { if dir == "snapshots/" {
dir = storage.storageDir + dir dir = storage.storageDir + dir
input := s3.ListObjectsInput{ input := s3.ListObjectsV2Input{
Bucket: aws.String(storage.bucket), Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir), Prefix: aws.String(dir),
Delimiter: aws.String("/"), Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1000),
} }
output, err := storage.client.ListObjects(&input) err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
for _, subDir := range page.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return true
})
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
for _, subDir := range output.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return files, nil, nil return files, nil, nil
} else { } else {
dir = storage.storageDir + dir dir = storage.storageDir + dir
marker := "" input := s3.ListObjectsV2Input{
for {
input := s3.ListObjectsInput{
Bucket: aws.String(storage.bucket), Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir), Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000), MaxKeys: aws.Int64(1000),
Marker: aws.String(marker),
} }
output, err := storage.client.ListObjects(&input) err := storage.client.ListObjectsV2Pages(&input, func(page *s3.ListObjectsV2Output, lastPage bool) bool {
if err != nil { for _, object := range page.Contents {
return nil, nil, err
}
for _, object := range output.Contents {
files = append(files, (*object.Key)[len(dir):]) files = append(files, (*object.Key)[len(dir):])
sizes = append(sizes, *object.Size) sizes = append(sizes, *object.Size)
} }
return true
if !*output.IsTruncated { })
break if err != nil {
} return nil, nil, err
marker = *output.Contents[len(output.Contents)-1].Key
} }
return files, sizes, nil return files, sizes, nil
} }

View File

@@ -756,8 +756,6 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
LOG_ERROR("STORAGE_CREATE", "Failed to load the Storj storage at %s: %v", storageURL, err) LOG_ERROR("STORAGE_CREATE", "Failed to load the Storj storage at %s: %v", storageURL, err)
return nil return nil
} }
SavePassword(preference, "storj_key", apiKey)
SavePassword(preference, "storj_passphrase", passphrase)
return storjStorage return storjStorage
} else if matched[1] == "smb" { } else if matched[1] == "smb" {
server := matched[3] server := matched[3]