Compare commits

...

11 Commits

Author SHA1 Message Date
Gilbert Chen
8b489f04eb Bump version to 2.6.0 2020-07-05 21:27:44 -04:00
Gilbert Chen
089e19f8e6 Add a -key-passphrase option to pass in passphrase for RSA private key
This option is mainly for the web GUI which currently doesn't have a way to
specify the passphrase to decrypt the RSA private key.
2020-07-05 20:58:07 -04:00
Gilbert Chen
1da7e2b536 Fix a crash when a username is not specified with the WebDAV backend 2020-07-03 12:29:53 -04:00
Gilbert Chen
ed8b4393be Add a new backend for StorageMadeEasy's File Fabric storage.
The storage url is fabric://username@storagemadeeasy.com/path/to/storage.
2020-07-03 12:07:33 -04:00
Gilbert Chen
5e28dc4911 Update go-dropbox dependency to fix 0-byte file uploading.
Incorporate gilbertchen/go-dropbox/commit/0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1.
2020-07-03 11:52:16 -04:00
Gilbert Chen
f2f07a120d Retry on "unexpected EOF" errors for the webdav backend. 2020-07-03 11:48:30 -04:00
Gilbert Chen
153f6a2d20 Use multiple threads to list the chunks directory for Google Drive 2020-06-15 12:49:13 -04:00
Gilbert Chen
5d45999077 Clear the loaded content after a snapshot has been verified
The snapshot content is loaded before verifying the snapshot, but after that
it isn't used anymore so it should be released to save memory.
2020-06-10 10:08:53 -04:00
Gilbert Chen
1adcf56890 Add an SFTP backend that supports more ciphers and kex algorithms.
"sftpc://" supports all algorithms implemented in golang.org/x/crypto/ssh,
especially including those weak ones that are excluded from the defaults.
2020-06-08 11:24:20 -04:00
Gilbert Chen
09e3cdfebf Ignore 0-byte chunks passed in by the chunk reader.
The fixed-size chunk reader may create 0-byte chunks from empty files.  This
may cause validation errors when preparing the snapshot file as the last step
of a backup.
2020-06-08 10:53:01 -04:00
Gilbert Chen
fe854d469d Error out in the check command if there are 0-size chunks. 2020-06-02 11:37:12 -04:00
10 changed files with 832 additions and 112 deletions

31
Gopkg.lock generated
View File

@@ -50,10 +50,9 @@
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/go-dropbox"
packages = ["."]
revision = "994e692c5061cefa14e4296600a773de7119aa15"
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
[[projects]]
name = "github.com/gilbertchen/go-ole"
@@ -99,7 +98,7 @@
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto","protoc-gen-go","protoc-gen-go/descriptor","protoc-gen-go/generator","protoc-gen-go/generator/internal/remap","protoc-gen-go/grpc","protoc-gen-go/plugin","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "84668698ea25b64748563aa20726db66a6b8d299"
version = "v1.3.5"
@@ -174,18 +173,6 @@
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
[[projects]]
branch = "master"
name = "golang.org/x/exp"
packages = ["apidiff","cmd/apidiff"]
revision = "e8c3332aa8e5b8e6acb4707c3a7e5979052b20aa"
[[projects]]
name = "golang.org/x/mod"
packages = ["module","semver"]
revision = "ed3ec21bb8e252814c380df79a80f366440ddb2d"
version = "v0.2.0"
[[projects]]
branch = "master"
name = "golang.org/x/net"
@@ -209,18 +196,6 @@
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "master"
name = "golang.org/x/tools"
packages = ["cmd/goimports","go/ast/astutil","go/gcexportdata","go/internal/gcimporter","go/internal/packagesdriver","go/packages","go/types/typeutil","internal/fastwalk","internal/gocommand","internal/gopathwalk","internal/imports","internal/packagesinternal","internal/telemetry/event"]
revision = "700752c244080ed7ef6a61c3cfd73382cd334e57"
[[projects]]
branch = "master"
name = "golang.org/x/xerrors"
packages = [".","internal"]
revision = "9bdfabe68543c54f90421aeb9a60ef8061b5b544"
[[projects]]
name = "google.golang.org/api"
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
@@ -248,6 +223,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "e124cf64f7f8770e51ae52ac89030d512da946e3fdc2666ebd3a604a624dd679"
inputs-digest = "e462352e0b0c726247078462e30a79330ac7a8b9dc62e9ed9d1e097b684224e9"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -46,8 +46,8 @@
name = "github.com/gilbertchen/cli"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/go-dropbox"
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
[[constraint]]
name = "github.com/gilbertchen/go-ole"

View File

@@ -212,15 +212,20 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
return true
}
func loadRSAPrivateKey(keyFile string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
func loadRSAPrivateKey(keyFile string, passphrase string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
if keyFile == "" {
return
}
prompt := fmt.Sprintf("Enter the passphrase for %s:", keyFile)
passphrase := duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
if passphrase == "" {
passphrase = duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
} else {
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
}
}
func initRepository(context *cli.Context) {
@@ -821,7 +826,7 @@ func restoreRepository(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns)
@@ -874,7 +879,7 @@ func listSnapshots(context *cli.Context) {
showChunks := context.Bool("chunks")
// list doesn't need to decrypt file chunks; but we need -key here so we can reset the passphrase for the private key
loadRSAPrivateKey(context.String("key"), preference, backupManager, resetPassword)
loadRSAPrivateKey(context.String("key"), "", preference, backupManager, resetPassword)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
@@ -919,7 +924,7 @@ func checkSnapshots(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
id := preference.SnapshotID
if context.Bool("all") {
@@ -977,7 +982,7 @@ func printFile(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1035,7 +1040,7 @@ func diff(context *cli.Context) {
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile)
@@ -1184,7 +1189,7 @@ func copySnapshots(context *cli.Context) {
sourceManager.SetupSnapshotCache(source.Name)
duplicacy.SavePassword(*source, "password", sourcePassword)
loadRSAPrivateKey(context.String("key"), source, sourceManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), source, sourceManager, false)
_, destination := getRepositoryPreference(context, context.String("to"))
@@ -1510,6 +1515,11 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
},
Usage: "Restore the repository to a previously saved snapshot",
ArgsUsage: "[--] [pattern] ...",
@@ -1621,6 +1631,11 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
cli.IntFlag{
Name: "threads",
Value: 1,
@@ -1655,6 +1670,11 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
},
Usage: "Print to stdout the specified file, or the snapshot content if no file is specified",
ArgsUsage: "[<file>]",
@@ -1688,6 +1708,11 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
},
Usage: "Compare two snapshots or two revisions of a file",
ArgsUsage: "[<file>]",
@@ -1965,6 +1990,11 @@ func main() {
Usage: "the RSA private key to decrypt file chunks from the source storage",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
},
Usage: "Copy snapshots between compatible storages",
ArgsUsage: " ",
@@ -2084,7 +2114,7 @@ func main() {
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.5.2" + " (" + GitCommit + ")"
app.Version = "2.6.0" + " (" + GitCommit + ")"
// If the program is interrupted, call the RunAtError function.
c := make(chan os.Signal, 1)

View File

@@ -520,6 +520,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
chunkID := chunk.GetID()
chunkSize := chunk.GetLength()
if chunkSize == 0 {
LOG_DEBUG("CHUNK_EMPTY", "Ignored chunk %s of size 0", chunkID)
return
}
chunkIndex++
_, found := chunkCache[chunkID]

View File

@@ -0,0 +1,618 @@
// Copyright (c) Storage Made Easy. All rights reserved.
//
// This storage backend is contributed by Storage Made Easy (https://storagemadeeasy.com/) to be used in
// Duplicacy and its derivative works.
//
package duplicacy
import (
"io"
"fmt"
"time"
"sync"
"bytes"
"errors"
"strings"
"net/url"
"net/http"
"math/rand"
"io/ioutil"
"encoding/xml"
"path/filepath"
"mime/multipart"
)
// The XML element representing a file returned by the File Fabric server
type FileFabricFile struct {
XMLName xml.Name
ID string `xml:"fi_id"`
Path string `xml:"path"`
Size int64 `xml:"fi_size"`
Type int `xml:"fi_type"`
}
// The XML element representing a file list returned by the server
type FileFabricFileList struct {
XMLName xml.Name `xml:"files"`
Files []FileFabricFile `xml:",any"`
}
type FileFabricStorage struct {
StorageBase
endpoint string // the server
authToken string // the authentication token
accessToken string // the access token (as returned by getTokenByAuthToken)
storageDir string // the path of the storage directory
storageDirID string // the id of 'storageDir'
client *http.Client // the default http client
threads int // number of threads
maxRetries int // maximum number of tries
directoryCache map[string]string // stores ids for directories known to this backend
directoryCacheLock sync.Mutex // lock for accessing directoryCache
isAuthorized bool
testMode bool
}
var (
errFileFabricAuthorizationFailure = errors.New("Authentication failure")
errFileFabricDirectoryExists = errors.New("Directory exists")
)
// The general server response
type FileFabricResponse struct {
Status string `xml:"status"`
Message string `xml:"statusmessage"`
}
// Check the server response and return an error representing the error message it contains
func checkFileFabricResponse(response FileFabricResponse, actionFormat string, actionArguments ...interface{}) error {
action := fmt.Sprintf(actionFormat, actionArguments...)
if response.Status == "ok" && response.Message == "Success" {
return nil
} else if response.Status == "error_data" {
if response.Message == "Folder with same name already exists." {
return errFileFabricDirectoryExists
}
}
return fmt.Errorf("Failed to %s (status: %s, message: %s)", action, response.Status, response.Message)
}
// Create a File Fabric storage backend
func CreateFileFabricStorage(endpoint string, token string, storageDir string, threads int) (storage *FileFabricStorage, err error) {
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}
storage = &FileFabricStorage{
endpoint: endpoint,
authToken: token,
client: http.DefaultClient,
threads: threads,
directoryCache: make(map[string]string),
maxRetries: 12,
}
err = storage.getAccessToken()
if err != nil {
return nil, err
}
storageDirID, isDir, _, err := storage.getFileInfo(0, storageDir)
if err != nil {
return nil, err
}
if storageDirID == "" {
return nil, fmt.Errorf("Storage path %s does not exist", storageDir)
}
if !isDir {
return nil, fmt.Errorf("Storage path %s is not a directory", storageDir)
}
storage.storageDir = storageDir
storage.storageDirID = storageDirID
for _, dir := range []string{"snapshots", "chunks"} {
storage.CreateDirectory(0, dir)
}
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{0}, 0)
return storage, nil
}
// Retrieve the access token using an auth token
func (storage *FileFabricStorage) getAccessToken() (error) {
formData := url.Values { "authtoken": {storage.authToken},}
readCloser, _, _, err := storage.sendRequest(0, http.MethodPost, storage.getAPIURL("getTokenByAuthToken"), nil, formData)
if err != nil {
return err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output struct {
FileFabricResponse
Token string `xml:"token"`
}
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return err
}
err = checkFileFabricResponse(output.FileFabricResponse, "request the access token")
if err != nil {
return err
}
storage.accessToken = output.Token
return nil
}
// Determine if we should retry based on the number of retries given by 'retry' and if so calculate the delay with exponential backoff
func (storage *FileFabricStorage) shouldRetry(retry int, messageFormat string, messageArguments ...interface{}) bool {
message := fmt.Sprintf(messageFormat, messageArguments...)
if retry >= storage.maxRetries {
LOG_WARN("FILEFABRIC_REQUEST", "%s", message)
return false
}
backoff := 1 << uint(retry)
if backoff > 60 {
backoff = 60
}
delay := rand.Intn(backoff*500) + backoff*500
LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay) / 1000.0)
time.Sleep(time.Duration(delay) * time.Millisecond)
return true
}
// Send a request to the server
func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) ( io.ReadCloser, http.Header, int64, error) {
var response *http.Response
for retries := 0; ; retries++ {
var inputReader io.Reader
switch input.(type) {
case url.Values:
values := input.(url.Values)
inputReader = strings.NewReader(values.Encode())
if requestHeaders == nil {
requestHeaders = make(map[string]string)
}
requestHeaders["Content-Type"] = "application/x-www-form-urlencoded"
case *RateLimitedReader:
rateLimitedReader := input.(*RateLimitedReader)
rateLimitedReader.Reset()
inputReader = rateLimitedReader
default:
LOG_FATAL("FILEFABRIC_REQUEST", "Input type is not supported")
return nil, nil, 0, fmt.Errorf("Input type is not supported")
}
request, err := http.NewRequest(method, requestURL, inputReader)
if err != nil {
return nil, nil, 0, err
}
if requestHeaders != nil {
for key, value := range requestHeaders {
request.Header.Set(key, value)
}
}
if _, ok := input.(*RateLimitedReader); ok {
request.ContentLength = input.(*RateLimitedReader).Length()
}
response, err = storage.client.Do(request)
if err != nil {
if !storage.shouldRetry(retries, "[%d] %s %s returned an error: %v", threadIndex, method, requestURL, err) {
return nil, nil, 0, err
}
continue
}
if response.StatusCode < 300 {
return response.Body, response.Header, response.ContentLength, nil
}
defer response.Body.Close()
defer io.Copy(ioutil.Discard, response.Body)
var output struct {
Status string `xml:"status"`
Message string `xml:"statusmessage"`
}
err = xml.NewDecoder(response.Body).Decode(&output)
if err != nil {
if !storage.shouldRetry(retries, "[%d] %s %s returned an invalid response: %v", threadIndex, method, requestURL, err) {
return nil, nil, 0, err
}
continue
}
if !storage.shouldRetry(retries, "[%d] %s %s returned status: %s, message: %s", threadIndex, method, requestURL, output.Status, output.Message) {
return nil, nil, 0, err
}
}
}
func (storage *FileFabricStorage) getAPIURL(function string) string {
if storage.accessToken == "" {
return "https://" + storage.endpoint + "/api/*/" + function + "/"
} else {
return "https://" + storage.endpoint + "/api/" + storage.accessToken + "/" + function + "/"
}
}
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
if dir != "" && dir[len(dir)-1] != '/' {
dir += "/"
}
dirID, _, _, err := storage.getFileInfo(threadIndex, dir)
if err != nil {
return nil, nil, err
}
if dirID == "" {
return nil, nil, nil
}
lastID := ""
for {
formData := url.Values { "marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid" : {dirID}}
if dir == "snapshots/" {
formData["includefolders"] = []string{"y"}
}
if storage.testMode {
formData["limit"] = []string{"5"}
}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getListOfFiles"), nil, formData)
if err != nil {
return nil, nil, err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output struct {
FileFabricResponse
FileList FileFabricFileList `xml:"files"`
Truncated int `xml:"truncated"`
}
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return nil, nil, err
}
err = checkFileFabricResponse(output.FileFabricResponse, "list the storage directory '%s'", dir)
if err != nil {
return nil, nil, err
}
if dir == "snapshots/" {
for _, file := range output.FileList.Files {
if file.Type == 1 {
files = append(files, file.Path + "/")
}
lastID = file.ID
}
} else {
for _, file := range output.FileList.Files {
if file.Type == 0 {
files = append(files, file.Path)
sizes = append(sizes, file.Size)
}
lastID = file.ID
}
}
if output.Truncated != 1 {
break
}
}
return files, sizes, nil
}
// getFileInfo returns the information about the file or directory at 'filePath'.
func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) (fileID string, isDir bool, size int64, err error) {
formData := url.Values { "path" : {storage.storageDir + filePath}}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("checkPathExists"), nil, formData)
if err != nil {
return "", false, 0, err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output struct {
FileFabricResponse
File FileFabricFile `xml:"file"`
Exists string `xml:"exists"`
}
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return "", false, 0, err
}
err = checkFileFabricResponse(output.FileFabricResponse, "get the info on '%s'", filePath)
if err != nil {
return "", false, 0, err
}
if output.Exists != "y" {
return "", false, 0, nil
} else {
if output.File.Type == 1 {
for filePath != "" && filePath[len(filePath)-1] == '/' {
filePath = filePath[:len(filePath)-1]
}
storage.directoryCacheLock.Lock()
storage.directoryCache[filePath] = output.File.ID
storage.directoryCacheLock.Unlock()
}
return output.File.ID, output.File.Type == 1, output.File.Size, nil
}
}
// GetFileInfo returns the information about the file or directory at 'filePath'. This is a function required by the Storage interface.
func (storage *FileFabricStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
fileID := ""
fileID, isDir, size, err = storage.getFileInfo(threadIndex, filePath)
return fileID != "", isDir, size, err
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) (err error) {
fileID, _, _, _ := storage.getFileInfo(threadIndex, filePath)
if fileID == "" {
return nil
}
formData := url.Values { "fi_id" : {fileID}}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doDeleteFile"), nil, formData)
if err != nil {
return err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output FileFabricResponse
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return err
}
err = checkFileFabricResponse(output, "delete file '%s'", filePath)
if err != nil {
return err
}
return nil
}
// MoveFile renames the file.
func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to string) (err error) {
fileID, _, _, _ := storage.getFileInfo(threadIndex, from)
if fileID == "" {
return nil
}
formData := url.Values { "fi_id" : {fileID}, "fi_name": {filepath.Base(to)},}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doRenameFile"), nil, formData)
if err != nil {
return err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output FileFabricResponse
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return err
}
err = checkFileFabricResponse(output, "rename file '%s' to '%s'", from, to)
if err != nil {
return err
}
return nil
}
// createParentDirectory creates the parent directory if it doesn't exist in the cache.
func (storage *FileFabricStorage) createParentDirectory(threadIndex int, dir string) (parentID string, err error) {
found := strings.LastIndex(dir, "/")
if found == -1 {
return storage.storageDirID, nil
}
parent := dir[:found]
storage.directoryCacheLock.Lock()
parentID = storage.directoryCache[parent]
storage.directoryCacheLock.Unlock()
if parentID != "" {
return parentID, nil
}
parentID, err = storage.createDirectory(threadIndex, parent)
if err != nil {
if err == errFileFabricDirectoryExists {
var isDir bool
parentID, isDir, _, err = storage.getFileInfo(threadIndex, parent)
if err != nil {
return "", err
}
if isDir == false {
return "", fmt.Errorf("'%s' in the storage is a file", parent)
}
storage.directoryCacheLock.Lock()
storage.directoryCache[parent] = parentID
storage.directoryCacheLock.Unlock()
return parentID, nil
} else {
return "", err
}
}
return parentID, nil
}
// createDirectory creates a new directory.
func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) (dirID string, err error) {
for dir != "" && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
parentID, err := storage.createParentDirectory(threadIndex, dir)
if err != nil {
return "", err
}
formData := url.Values { "fi_name": {filepath.Base(dir)}, "fi_pid" : {parentID}}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doCreateNewFolder"), nil, formData)
if err != nil {
return "", err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output struct {
FileFabricResponse
File FileFabricFile `xml:"file"`
}
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return "", err
}
err = checkFileFabricResponse(output.FileFabricResponse, "create directory '%s'", dir)
if err != nil {
return "", err
}
storage.directoryCacheLock.Lock()
storage.directoryCache[dir] = output.File.ID
storage.directoryCacheLock.Unlock()
return output.File.ID, nil
}
func (storage *FileFabricStorage) CreateDirectory(threadIndex int, dir string) (err error) {
_, err = storage.createDirectory(threadIndex, dir)
if err == errFileFabricDirectoryExists {
return nil
}
return err
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
formData := url.Values { "fi_id" : {storage.storageDir + filePath}}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getFile"), nil, formData)
if err != nil {
return err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
parentID, err := storage.createParentDirectory(threadIndex, filePath)
if err != nil {
return err
}
fileName := filepath.Base(filePath)
requestBody := &bytes.Buffer{}
writer := multipart.NewWriter(requestBody)
part, _ := writer.CreateFormFile("file_1", fileName)
part.Write(content)
writer.WriteField("file_name1", fileName)
writer.WriteField("fi_pid", parentID)
writer.WriteField("fi_structtype", "g")
writer.Close()
headers := make(map[string]string)
headers["Content-Type"] = writer.FormDataContentType()
rateLimitedReader := CreateRateLimitedReader(requestBody.Bytes(), storage.UploadRateLimit/storage.threads)
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doUploadFiles"), headers, rateLimitedReader)
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output FileFabricResponse
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return err
}
err = checkFileFabricResponse(output, "upload file '%s'", filePath)
if err != nil {
return err
}
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *FileFabricStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *FileFabricStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *FileFabricStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *FileFabricStorage) IsFastListing() bool { return false }
// Enable the test mode.
func (storage *FileFabricStorage) EnableTestMode() { storage.testMode = true }

View File

@@ -86,6 +86,10 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
// Request timeout
message = e.Message
retry = true
} else if e.Code == 400 && strings.Contains(e.Message, "failedPrecondition") {
// Daily quota exceeded
message = e.Message
retry = true
} else if e.Code == 401 {
// Only retry on authorization error when storage has been connected before
if storage.isConnected {
@@ -476,39 +480,76 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
}
return files, nil, nil
} else {
files := []string{}
sizes := []int64{}
lock := sync.Mutex {}
allFiles := []string{}
allSizes := []int64{}
errorChannel := make(chan error)
directoryChannel := make(chan string)
activeWorkers := 0
parents := []string{"chunks", "fossils"}
for i := 0; i < len(parents); i++ {
parent := parents[i]
pathID, ok := storage.findPathID(parent)
if !ok {
continue
}
entries, err := storage.listFiles(threadIndex, pathID, true, true)
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
if entry.MimeType != GCDDirectoryMimeType {
name := entry.Name
if strings.HasPrefix(parent, "fossils") {
name = parent + "/" + name + ".fsl"
name = name[len("fossils/"):]
} else {
name = parent + "/" + name
name = name[len("chunks/"):]
for len(parents) > 0 || activeWorkers > 0 {
if len(parents) > 0 && activeWorkers < storage.numberOfThreads {
parent := parents[0]
parents = parents[1:]
activeWorkers++
go func(parent string) {
pathID, ok := storage.findPathID(parent)
if !ok {
return
}
entries, err := storage.listFiles(threadIndex, pathID, true, true)
if err != nil {
errorChannel <- err
return
}
LOG_DEBUG("GCD_STORAGE", "Listing %s; %d items returned", parent, len(entries))
files := []string {}
sizes := []int64 {}
for _, entry := range entries {
if entry.MimeType != GCDDirectoryMimeType {
name := entry.Name
if strings.HasPrefix(parent, "fossils") {
name = parent + "/" + name + ".fsl"
name = name[len("fossils/"):]
} else {
name = parent + "/" + name
name = name[len("chunks/"):]
}
files = append(files, name)
sizes = append(sizes, entry.Size)
} else {
directoryChannel <- parent+"/"+entry.Name
storage.savePathID(parent+"/"+entry.Name, entry.Id)
}
}
lock.Lock()
allFiles = append(allFiles, files...)
allSizes = append(allSizes, sizes...)
lock.Unlock()
directoryChannel <- ""
} (parent)
}
if activeWorkers > 0 {
select {
case err := <- errorChannel:
return nil, nil, err
case directory := <- directoryChannel:
if directory == "" {
activeWorkers--
} else {
parents = append(parents, directory)
}
files = append(files, name)
sizes = append(sizes, entry.Size)
} else {
parents = append(parents, parent+"/"+entry.Name)
storage.savePathID(parent+"/"+entry.Name, entry.Id)
}
}
}
return files, sizes, nil
return allFiles, allSizes, nil
}
}

View File

@@ -43,10 +43,10 @@ func CreateSFTPStorageWithPassword(server string, port int, username string, sto
return nil
}
return CreateSFTPStorage(server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
return CreateSFTPStorage(false, server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
}
func CreateSFTPStorage(server string, port int, username string, storageDir string, minimumNesting int,
func CreateSFTPStorage(compatibilityMode bool, server string, port int, username string, storageDir string, minimumNesting int,
authMethods []ssh.AuthMethod,
hostKeyCallback func(hostname string, remote net.Addr,
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
@@ -57,8 +57,21 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
HostKeyCallback: hostKeyCallback,
}
if server == "sftp.hidrive.strato.com" {
sftpConfig.Ciphers = []string{"aes128-ctr", "aes256-ctr"}
if compatibilityMode {
sftpConfig.Ciphers = []string{
"aes128-ctr", "aes192-ctr", "aes256-ctr",
"aes128-gcm@openssh.com",
"chacha20-poly1305@openssh.com",
"arcfour256", "arcfour128", "arcfour",
"aes128-cbc",
"3des-cbc",
}
sftpConfig.KeyExchanges = [] string {
"curve25519-sha256@libssh.org",
"ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521",
"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1",
"diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256",
}
}
serverAddress := fmt.Sprintf("%s:%d", server, port)

View File

@@ -381,6 +381,13 @@ func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, pat
return true
}
// ClearSnapshotContents removes contents loaded by DownloadSnapshotContents
func (manager *SnapshotManager) ClearSnapshotContents(snapshot *Snapshot) {
snapshot.ChunkHashes = nil
snapshot.ChunkLengths = nil
snapshot.Files = nil
}
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, allSnapshots map[string][]*Snapshot) bool {
@@ -821,6 +828,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
// Store the index of the snapshot that references each chunk; if the chunk is shared by multiple chunks, the index is -1
chunkSnapshotMap := make(map[string]int)
emptyChunks := 0
LOG_INFO("SNAPSHOT_CHECK", "Listing all chunks")
allChunks, allSizes := manager.ListAllFiles(manager.storage, chunkDir)
@@ -835,6 +844,11 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
chunk = strings.Replace(chunk, "/", "", -1)
chunkSizeMap[chunk] = allSizes[i]
if allSizes[i] == 0 {
LOG_WARN("SNAPSHOT_CHECK", "Chunk %s has a size of 0", chunk)
emptyChunks++
}
}
if snapshotID == "" || showStatistics || showTabular {
@@ -899,6 +913,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
if checkFiles {
manager.DownloadSnapshotContents(snapshot, nil, false)
manager.VerifySnapshot(snapshot)
manager.ClearSnapshotContents(snapshot)
continue
}
@@ -991,6 +1006,11 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
return false
}
if emptyChunks > 0 {
LOG_ERROR("SNAPSHOT_CHECK", "%d chunks have a size of 0", emptyChunks)
return false
}
if showTabular {
manager.ShowStatisticsTabular(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
} else if showStatistics {

View File

@@ -268,7 +268,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
if matched == nil {
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
return nil
} else if matched[1] == "sftp" {
} else if matched[1] == "sftp" || matched[1] == "sftpc" {
server := matched[3]
username := matched[2]
storageDir := matched[5]
@@ -440,7 +440,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
return checkHostKey(hostname, remote, key)
}
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
sftpStorage, err := CreateSFTPStorage(matched[1] == "sftpc", server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
return nil
@@ -678,6 +678,10 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
server := matched[3]
username := matched[2]
if username == "" {
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the WebDAV storage")
return nil
}
username = username[:len(username)-1]
storageDir := matched[5]
port := 0
@@ -698,6 +702,18 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
}
SavePassword(preference, "webdav_password", password)
return webDAVStorage
} else if matched[1] == "fabric" {
endpoint := matched[3]
storageDir := matched[5]
prompt := fmt.Sprintf("Enter the token for accessing the Storage Made Easy File Fabric storage:")
token := GetPassword(preference, "fabric_token", prompt, true, resetPassword)
smeStorage, err := CreateFileFabricStorage(endpoint, token, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the File Fabric storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "fabric_token", token)
return smeStorage
} else {
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
return nil

View File

@@ -14,7 +14,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
//"net/http/httputil"
@@ -214,53 +213,56 @@ type WebDAVMultiStatus struct {
func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ...string) (map[string]WebDAVProperties, error) {
propfind := "<prop>"
for _, p := range properties {
propfind += fmt.Sprintf("<%s/>", p)
}
propfind += "</prop>"
maxTries := 3
for tries := 0; ; tries++ {
propfind := "<prop>"
for _, p := range properties {
propfind += fmt.Sprintf("<%s/>", p)
}
propfind += "</prop>"
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
if err != nil {
return nil, err
}
defer readCloser.Close()
content, err := ioutil.ReadAll(readCloser)
if err != nil {
return nil, err
}
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
if err != nil {
return nil, err
}
defer readCloser.Close()
object := WebDAVMultiStatus{}
err = xml.Unmarshal(content, &object)
if err != nil {
return nil, err
}
if object.Responses == nil || len(object.Responses) == 0 {
return nil, errors.New("no WebDAV responses")
}
responses := make(map[string]WebDAVProperties)
for _, responseTag := range object.Responses {
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
return nil, errors.New("no WebDAV properties")
object := WebDAVMultiStatus{}
err = xml.NewDecoder(readCloser).Decode(&object)
if err != nil {
if strings.Contains(err.Error(), "unexpected EOF") && tries < maxTries {
LOG_WARN("WEBDAV_RETRY", "Retrying on %v", err)
continue
}
return nil, err
}
properties := make(WebDAVProperties)
for _, prop := range responseTag.PropStat.Prop.PropList {
properties[prop.XMLName.Local] = prop.Value
if object.Responses == nil || len(object.Responses) == 0 {
return nil, errors.New("no WebDAV responses")
}
responseKey := responseTag.Href
responses[responseKey] = properties
responses := make(map[string]WebDAVProperties)
for _, responseTag := range object.Responses {
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
return nil, errors.New("no WebDAV properties")
}
properties := make(WebDAVProperties)
for _, prop := range responseTag.PropStat.Prop.PropList {
properties[prop.XMLName.Local] = prop.Value
}
responseKey := responseTag.Href
responses[responseKey] = properties
}
return responses, nil
}
return responses, nil
}
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with