Implement fast resume; refactor GetDuplicacyPreferencePath()

This commit is contained in:
Gilbert Chen
2017-06-22 22:53:33 -04:00
parent 839be6094f
commit d0c376f593
12 changed files with 393 additions and 146 deletions

View File

@@ -13,6 +13,7 @@ import (
"path"
"time"
"sort"
"sync"
"sync/atomic"
"strings"
"strconv"
@@ -70,9 +71,9 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
// directory
func (manager *BackupManager) SetupSnapshotCache(top string, storageName string) bool {
func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
preferencePath := GetDuplicacyPreferencePath(top)
preferencePath := GetDuplicacyPreferencePath()
cacheDir := path.Join(preferencePath, "cache", storageName)
storage, err := CreateFileStorage(cacheDir, 1)
@@ -94,11 +95,19 @@ func (manager *BackupManager) SetupSnapshotCache(top string, storageName string)
return true
}
// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
// original unchanged entry list.
//
// This function assumes the Size field of each entry is equal to the length of the chunk content that belong
// to the file.
func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
if len(entries) == 0 {
return
}
// The following code works by iterating over 'entries' and 'chunkLength' and keeping track of the
// accumulated total file size and the accumulated total chunk size.
i := 0
totalChunkSize := int64(0)
totalFileSize := entries[i].Size
@@ -115,6 +124,8 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
break
}
// If the current file ends at the end of the current chunk, the next file will
// start at the next chunk
if totalChunkSize + int64(length) == totalFileSize {
entries[i].StartChunk = j + 1 + offset
entries[i].StartOffset = 0
@@ -126,6 +137,9 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
totalFileSize += entries[i].Size
}
if i >= len(entries) {
break
}
totalChunkSize += int64(length)
}
}
@@ -150,7 +164,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
if remoteSnapshot == nil {
quickMode = false
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
LOG_INFO("BACKUP_START", "No previous backup found")
} else {
@@ -171,28 +184,72 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// UploadChunk.
chunkCache := make(map[string]bool)
var incompleteSnapshot *Snapshot
// A revision number of 0 means this is the initial backup
if remoteSnapshot.Revision > 0 {
// Add all chunks in the last snapshot to the
// Add all chunks in the last snapshot to the cache
for _, chunkID := range manager.SnapshotManager.GetSnapshotChunks(remoteSnapshot) {
chunkCache[chunkID] = true
}
} else if manager.storage.IsFastListing() {
// If the listing operation is fast, list all chunks and put them in the cache.
LOG_INFO("BACKUP_LIST", "Listing all chunks")
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
for _, chunk := range allChunks {
if len(chunk) == 0 || chunk[len(chunk) - 1] == '/' {
continue
}
if strings.HasSuffix(chunk, ".fsl") {
continue
}
chunk = strings.Replace(chunk, "/", "", -1)
chunkCache[chunk] = true
} else {
// In quick mode, attempt to load the incomplete snapshot from last incomplete backup if there is one.
if quickMode {
incompleteSnapshot = LoadIncompleteSnapshot()
}
// If the listing operation is fast or there is an incomplete snapshot, list all chunks and
// put them in the cache.
if manager.storage.IsFastListing() || incompleteSnapshot != nil {
LOG_INFO("BACKUP_LIST", "Listing all chunks")
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
for _, chunk := range allChunks {
if len(chunk) == 0 || chunk[len(chunk) - 1] == '/' {
continue
}
if strings.HasSuffix(chunk, ".fsl") {
continue
}
chunk = strings.Replace(chunk, "/", "", -1)
chunkCache[chunk] = true
}
}
if incompleteSnapshot != nil {
// This is the last chunk from the incomplete snapshot that can be found in the cache
lastCompleteChunk := -1
for i, chunkHash := range incompleteSnapshot.ChunkHashes {
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
if _, ok := chunkCache[chunkID]; ok {
lastCompleteChunk = i
} else {
break
}
}
// Only keep those files whose chunks exist in the cache
var files []*Entry
for _, file := range incompleteSnapshot.Files {
if file.StartChunk <= lastCompleteChunk && file.EndChunk <= lastCompleteChunk {
files = append(files, file)
} else {
break
}
}
incompleteSnapshot.Files = files
// Remove incomplete chunks (they may not have been uploaded)
incompleteSnapshot.ChunkHashes = incompleteSnapshot.ChunkHashes[:lastCompleteChunk + 1]
incompleteSnapshot.ChunkLengths = incompleteSnapshot.ChunkLengths[:lastCompleteChunk + 1]
remoteSnapshot = incompleteSnapshot
}
}
var numberOfNewFileChunks int // number of new file chunks
@@ -211,10 +268,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var modifiedEntries [] *Entry // Files that has been modified or newly created
var preservedEntries [] *Entry // Files unchanges
// If the quick mode is enabled, we simply treat all files as if they were new, and break them into chunks.
// If the quick mode is disable and there isn't an incomplete snapshot from last (failed) backup,
// we simply treat all files as if they were new, and break them into chunks.
// Otherwise, we need to find those that are new or recently modified
if !quickMode {
if remoteSnapshot.Revision == 0 && incompleteSnapshot == nil {
modifiedEntries = localSnapshot.Files
for _, entry := range modifiedEntries {
totalModifiedFileSize += entry.Size
@@ -268,7 +326,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var preservedChunkHashes []string
var preservedChunkLengths []int
// For each preserved file, adjust the indices StartChunk and EndChunk. This is done by finding gaps
// For each preserved file, adjust the StartChunk and EndChunk pointers. This is done by finding gaps
// between these indices and subtracting the number of deleted chunks.
last := -1
deletedChunks := 0
@@ -295,6 +353,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
var uploadedEntries [] *Entry
var uploadedChunkHashes []string
var uploadedChunkLengths []int
var uploadedChunkLock = &sync.Mutex{}
// the file reader implements the Reader interface. When an EOF is encounter, it opens the next file unless it
// is the last file.
@@ -318,6 +377,37 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
chunkMaker := CreateChunkMaker(manager.config, false)
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
localSnapshotReady := false
var once sync.Once
if remoteSnapshot.Revision == 0 {
// In case an error occurs during the initial backup, save the incomplete snapshot
RunAtError = func() {
once.Do(
func() {
if !localSnapshotReady {
// Lock it to gain exclusive access to uploadedChunkHashes and uploadedChunkLengths
uploadedChunkLock.Lock()
for _, entry := range uploadedEntries {
entry.EndChunk = -1
}
setEntryContent(uploadedEntries, uploadedChunkLengths, len(preservedChunkHashes))
if len(preservedChunkHashes) > 0 {
localSnapshot.ChunkHashes = preservedChunkHashes
localSnapshot.ChunkHashes = append(localSnapshot.ChunkHashes, uploadedChunkHashes...)
localSnapshot.ChunkLengths = preservedChunkLengths
localSnapshot.ChunkLengths = append(localSnapshot.ChunkLengths, uploadedChunkLengths...)
} else {
localSnapshot.ChunkHashes = uploadedChunkHashes
localSnapshot.ChunkLengths = uploadedChunkLengths
}
uploadedChunkLock.Unlock()
}
SaveIncompleteSnapshot(localSnapshot)
})
}
}
if fileReader.CurrentFile != nil {
LOG_TRACE("PACK_START", "Packing %s", fileReader.CurrentEntry.Path)
@@ -398,8 +488,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
chunkUploader.StartChunk(chunk, chunkIndex)
}
// Must lock it because the RunAtError function called by other threads may access these two slices
uploadedChunkLock.Lock()
uploadedChunkHashes = append(uploadedChunkHashes, hash)
uploadedChunkLengths = append(uploadedChunkLengths, chunkSize)
uploadedChunkLock.Unlock()
},
func (fileSize int64, hash string) (io.Reader, bool) {
@@ -445,6 +538,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
localSnapshot.ChunkLengths = uploadedChunkLengths
}
localSnapshotReady = true
localSnapshot.EndTime = time.Now().Unix()
err = manager.SnapshotManager.CheckSnapshot(localSnapshot)
@@ -455,10 +550,15 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
localSnapshot.Tag = tag
localSnapshot.Options = ""
if !quickMode {
if !quickMode || remoteSnapshot.Revision == 0 {
localSnapshot.Options = "-hash"
}
if _, found := os.LookupEnv("DUPLICACY_FAIL_SNAPSHOT"); found {
LOG_ERROR("SNAPSHOT_FAIL", "Artificially fail the backup for testing purposes")
return false
}
if shadowCopy {
if localSnapshot.Options == "" {
localSnapshot.Options = "-vss"
@@ -505,6 +605,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
manager.SnapshotManager.CleanSnapshotCache(localSnapshot, nil)
LOG_INFO("BACKUP_END", "Backup for %s at revision %d completed", top, localSnapshot.Revision)
RunAtError = func() {}
RemoveIncompleteSnapshot()
totalSnapshotChunks := len(localSnapshot.FileSequence) + len(localSnapshot.ChunkSequence) +
len(localSnapshot.LengthSequence)
@@ -981,7 +1083,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
var existingFile, newFile *os.File
var err error
preferencePath := GetDuplicacyPreferencePath(top)
preferencePath := GetDuplicacyPreferencePath()
temporaryPath := path.Join(preferencePath, "temporary")
fullPath := joinPath(top, entry.Path)

View File

@@ -215,8 +215,9 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository1")
backupManager := CreateBackupManager("host1", storage, testDir, password)
backupManager.SetupSnapshotCache(testDir + "/repository1", "default")
backupManager.SetupSnapshotCache("default")
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
time.Sleep(time.Duration(delay) * time.Second)

View File

@@ -160,6 +160,9 @@ const (
otherExitCode = 101
)
// This is the function to be called before exiting when an error occurs.
var RunAtError func() = func() {}
func CatchLogException() {
if r := recover(); r != nil {
switch e := r.(type) {
@@ -167,10 +170,12 @@ func CatchLogException() {
if printStackTrace {
debug.PrintStack()
}
RunAtError()
os.Exit(duplicacyExitCode)
default:
fmt.Fprintf(os.Stderr, "%v\n", e)
debug.PrintStack()
RunAtError()
os.Exit(otherExitCode)
}
}

View File

@@ -24,53 +24,38 @@ type Preference struct {
Keys map[string]string `json:"keys"`
}
var preferencePath string
var Preferences [] Preference
// Compute .duplicacy directory path name:
// - if .duplicacy is a directory -> compute absolute path name and return it
// - if .duplicacy is a file -> assumed this file contains the real path name of .duplicacy
// - if pointed directory does not exits... return error
func GetDuplicacyPreferencePath( repository string) (preferencePath string){
func LoadPreferences(repository string) bool {
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY) //TOKEEP
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY)
stat, err := os.Stat(preferencePath)
if err != nil && !os.IsNotExist(err) {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to retrieve the information about the directory %s: %v",
repository, err)
return ""
if err != nil {
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", repository, err)
return false
}
if stat != nil && stat.IsDir() {
// $repository/.duplicacy exists and is a directory --> we found the .duplicacy directory
return path.Clean(preferencePath)
}
if stat != nil && stat.Mode().IsRegular() {
b, err := ioutil.ReadFile(preferencePath) // just pass the file name
if !stat.IsDir() {
content, err := ioutil.ReadFile(preferencePath)
if err != nil {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to read file %s: %v",
preferencePath, err)
return ""
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
return false
}
dotDuplicacyContent := string(b) // convert content to a 'string'
stat, err := os.Stat(dotDuplicacyContent)
if err != nil && !os.IsNotExist(err) {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to retrieve the information about the directory %s: %v",
repository, err)
return ""
realPreferencePath := string(content)
stat, err := os.Stat(realPreferencePath)
if err != nil {
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
return false
}
if stat != nil && stat.IsDir() {
// If expression read from .duplicacy file is a directory --> we found the .duplicacy directory
return path.Clean(dotDuplicacyContent)
if !stat.IsDir() {
LOG_ERROR("PREFERENCE_PATH", "The preference path %s is not a directory", realPreferencePath)
}
}
return ""
}
func LoadPreferences(repository string) (bool) {
preferencePath = realPreferencePath
}
preferencePath := GetDuplicacyPreferencePath(repository)
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
if err != nil {
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
@@ -91,14 +76,27 @@ func LoadPreferences(repository string) (bool) {
return true
}
func SavePreferences(repository string) (bool) {
func GetDuplicacyPreferencePath() string {
if preferencePath == "" {
LOG_ERROR("PREFERENCE_PATH", "The preference path has not been set")
return ""
}
return preferencePath
}
// Normally 'preferencePath' is set in LoadPreferences; however, if LoadPreferences is not called, this function
// provide another change to set 'preferencePath'
func SetDuplicacyPreferencePath(p string) {
preferencePath = p
}
func SavePreferences() (bool) {
description, err := json.MarshalIndent(Preferences, "", " ")
if err != nil {
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
return false
}
preferencePath := GetDuplicacyPreferencePath(repository)
preferenceFile := path.Join(preferencePath, "/preferences")
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
err = ioutil.WriteFile(preferenceFile, description, 0644)
if err != nil {

View File

@@ -509,7 +509,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
preferencePath := GetDuplicacyPreferencePath(top)
preferencePath := GetDuplicacyPreferencePath()
shadowLink = preferencePath + "\\shadow"
os.Remove(shadowLink)
err = os.Symlink(snapshotPath + "\\", shadowLink)

View File

@@ -68,8 +68,7 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
var patterns []string
preferencePath := GetDuplicacyPreferencePath(top)
patternFile, err := ioutil.ReadFile(path.Join(preferencePath, "filters"))
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
if err == nil {
for _, pattern := range strings.Split(string(patternFile), "\n") {
pattern = strings.TrimSpace(pattern)
@@ -138,6 +137,96 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
return snapshot, skippedDirectories, skippedFiles, nil
}
// This is the struct used to save/load incomplete snapshots
type IncompleteSnapshot struct {
Files [] *Entry
ChunkHashes []string
ChunkLengths [] int
}
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
description, err := ioutil.ReadFile(snapshotFile)
if err != nil {
return nil
}
var incompleteSnapshot IncompleteSnapshot
err = json.Unmarshal(description, &incompleteSnapshot)
if err != nil {
return nil
}
var chunkHashes []string
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
hash, err := hex.DecodeString(chunkHash)
if err != nil {
return nil
}
chunkHashes = append(chunkHashes, string(hash))
}
snapshot = &Snapshot {
Files: incompleteSnapshot.Files,
ChunkHashes: chunkHashes,
ChunkLengths: incompleteSnapshot.ChunkLengths,
}
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snpashot loaded from %s", snapshotFile)
return snapshot
}
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
func SaveIncompleteSnapshot(snapshot *Snapshot) {
var files []*Entry
for _, file := range snapshot.Files {
if file.EndChunk >= 0 {
file.Attributes = nil
files = append(files, file)
} else {
break
}
}
var chunkHashes []string
for _, chunkHash := range snapshot.ChunkHashes {
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
}
incompleteSnapshot := IncompleteSnapshot {
Files: files,
ChunkHashes: chunkHashes,
ChunkLengths: snapshot.ChunkLengths,
}
description, err := json.Marshal(incompleteSnapshot)
if err != nil {
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
return
}
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
err = ioutil.WriteFile(snapshotFile, description, 0644)
if err != nil {
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
return
}
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
}
func RemoveIncompleteSnapshot() {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
err = os.Remove(snapshotFile)
if err != nil {
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
} else {
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
}
}
}
// CreateSnapshotFromDescription creates a snapshot from json decription.
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {

View File

@@ -1496,7 +1496,7 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string
// Note that a snapshot being created when step 2 is in progress may reference a fossil. To avoid this
// problem, never remove the lastest revision (unless exclusive is true), and only cache chunks referenced
// by the lastest revision.
func (manager *SnapshotManager) PruneSnapshots(top string, selfID string, snapshotID string, revisionsToBeDeleted []int,
func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, revisionsToBeDeleted []int,
tags []string, retentions []string,
exhaustive bool, exclusive bool, ignoredIDs []string,
dryRun bool, deleteOnly bool, collectOnly bool) bool {
@@ -1511,7 +1511,7 @@ func (manager *SnapshotManager) PruneSnapshots(top string, selfID string, snapsh
LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified")
}
preferencePath := GetDuplicacyPreferencePath(top)
preferencePath := GetDuplicacyPreferencePath()
logDir := path.Join(preferencePath, "logs")
os.Mkdir(logDir, 0700)
logFileName := path.Join(logDir, time.Now().Format("prune-log-20060102-150405"))

View File

@@ -248,11 +248,11 @@ func TestSingleRepositoryPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 1 snapshot")
@@ -261,7 +261,7 @@ func TestSingleRepositoryPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
}
@@ -288,11 +288,11 @@ func TestSingleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
@@ -301,7 +301,7 @@ func TestSingleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 0)
}
@@ -329,11 +329,11 @@ func TestMultipleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
@@ -342,7 +342,7 @@ func TestMultipleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Creating 1 snapshot")
@@ -351,7 +351,7 @@ func TestMultipleHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 4, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 4, 0)
}
@@ -376,7 +376,7 @@ func TestPruneAndResurrect(t *testing.T) {
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 1 snapshot")
@@ -385,7 +385,7 @@ func TestPruneAndResurrect(t *testing.T) {
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
}
@@ -413,11 +413,11 @@ func TestInactiveHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
@@ -426,7 +426,7 @@ func TestInactiveHostPrune(t *testing.T) {
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 0)
}
@@ -454,14 +454,14 @@ func TestRetentionPolicy(t *testing.T) {
checkTestSnapshots(snapshotManager, 30, 0)
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
snapshotManager.PruneSnapshots(testDir, "vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 12, 0)
}

View File

@@ -75,13 +75,9 @@ func (storage *RateLimitedStorage) SetRateLimits(downloadRateLimit int, uploadRa
storage.UploadRateLimit = uploadRateLimit
}
func checkHostKey(repository string, hostname string, remote net.Addr, key ssh.PublicKey) error {
if len(repository) == 0 {
return nil
}
func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
preferencePath := GetDuplicacyPreferencePath(repository)
preferencePath := GetDuplicacyPreferencePath()
hostFile := path.Join(preferencePath, "known_hosts")
file, err := os.OpenFile(hostFile, os.O_RDWR | os.O_CREATE, 0600)
if err != nil {
@@ -126,7 +122,7 @@ func checkHostKey(repository string, hostname string, remote net.Addr, key ssh.P
}
// CreateStorage creates a storage object based on the provide storage URL.
func CreateStorage(repository string, preference Preference, resetPassword bool, threads int) (storage Storage) {
func CreateStorage(preference Preference, resetPassword bool, threads int) (storage Storage) {
storageURL := preference.StorageURL
@@ -282,7 +278,7 @@ func CreateStorage(repository string, preference Preference, resetPassword bool,
}
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
return checkHostKey(repository, hostname, remote, key)
return checkHostKey(hostname, remote, key)
}
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)