mirror of
https://github.com/jkl1337/duplicacy.git
synced 2026-01-02 11:44:45 -06:00
add -persist in check and restore mode (for PR)
This commit is contained in:
@@ -8,6 +8,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
@@ -741,7 +742,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will
|
||||
// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files.
|
||||
func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool,
|
||||
deleteMode bool, setOwner bool, showStatistics bool, patterns []string) bool {
|
||||
deleteMode bool, setOwner bool, showStatistics bool, patterns []string, allowFailures bool) bool {
|
||||
|
||||
startTime := time.Now().Unix()
|
||||
|
||||
@@ -809,6 +810,18 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
|
||||
var totalFileSize int64
|
||||
var downloadedFileSize int64
|
||||
var failedFileSize int64
|
||||
var skippedFileSize int64
|
||||
|
||||
var downloadedFiles []*Entry
|
||||
var skippedFiles []*Entry
|
||||
|
||||
// for storing failed files and reason for failure
|
||||
type FailedEntry struct {
|
||||
entry *Entry
|
||||
failReason string
|
||||
}
|
||||
var failedFiles []*FailedEntry
|
||||
|
||||
i := 0
|
||||
for _, entry := range remoteSnapshot.Files {
|
||||
@@ -827,6 +840,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
i++
|
||||
if quickMode && local.IsSameAs(entry) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", local.Path)
|
||||
skippedFileSize += entry.Size
|
||||
skippedFiles = append(skippedFiles, entry)
|
||||
skipped = true
|
||||
}
|
||||
}
|
||||
@@ -892,14 +907,13 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
// Sort entries by their starting chunks in order to linearize the access to the chunk chain.
|
||||
sort.Sort(ByChunk(fileEntries))
|
||||
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, showStatistics, threads)
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, showStatistics, threads, allowFailures)
|
||||
chunkDownloader.AddFiles(remoteSnapshot, fileEntries)
|
||||
|
||||
chunkMaker := CreateChunkMaker(manager.config, true)
|
||||
|
||||
startDownloadingTime := time.Now().Unix()
|
||||
|
||||
var downloadedFiles []*Entry
|
||||
// Now download files one by one
|
||||
for _, file := range fileEntries {
|
||||
|
||||
@@ -909,12 +923,16 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
if quickMode {
|
||||
if file.IsSameAsFileInfo(stat) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path)
|
||||
skippedFileSize += file.Size
|
||||
skippedFiles = append(skippedFiles, file)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if file.Size == 0 && file.IsSameAsFileInfo(stat) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (size 0)", file.Path)
|
||||
skippedFileSize += file.Size
|
||||
skippedFiles = append(skippedFiles, file)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
@@ -937,17 +955,33 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
if !showStatistics {
|
||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (0)", file.Path)
|
||||
downloadedFileSize += file.Size
|
||||
downloadedFiles = append(downloadedFiles, file)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics,
|
||||
totalFileSize, downloadedFileSize, startDownloadingTime) {
|
||||
downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics,
|
||||
totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures)
|
||||
if err != nil {
|
||||
// RestoreFile produced an error
|
||||
failedFileSize += file.Size
|
||||
failedFiles = append(failedFiles, &FailedEntry{file, err.Error()})
|
||||
continue
|
||||
}
|
||||
|
||||
// No error
|
||||
if downloaded {
|
||||
// No error, file was restored
|
||||
downloadedFileSize += file.Size
|
||||
downloadedFiles = append(downloadedFiles, file)
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
} else {
|
||||
// No error, file was skipped
|
||||
skippedFileSize += file.Size
|
||||
skippedFiles = append(skippedFiles, file)
|
||||
}
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
}
|
||||
|
||||
if deleteMode && len(patterns) == 0 {
|
||||
@@ -971,6 +1005,16 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
for _, file := range downloadedFiles {
|
||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size)
|
||||
}
|
||||
for _, file := range skippedFiles {
|
||||
LOG_INFO("DOWNLOAD_SKIP", "Skipped %s (%d)", file.Path, file.Size)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failedFiles) > 0 {
|
||||
for _, failed := range failedFiles {
|
||||
file := failed.entry
|
||||
LOG_WARN("RESTORE_STATS", "Restore failed %s (%d): %s", file.Path, file.Size, failed.failReason)
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INFO("RESTORE_END", "Restored %s to revision %d", top, revision)
|
||||
@@ -978,6 +1022,13 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
LOG_INFO("RESTORE_STATS", "Files: %d total, %s bytes", len(fileEntries), PrettySize(totalFileSize))
|
||||
LOG_INFO("RESTORE_STATS", "Downloaded %d file, %s bytes, %d chunks",
|
||||
len(downloadedFiles), PrettySize(downloadedFileSize), chunkDownloader.numberOfDownloadedChunks)
|
||||
LOG_INFO("RESTORE_STATS", "Skipped %d file, %s bytes",
|
||||
len(skippedFiles), PrettySize(skippedFileSize))
|
||||
LOG_INFO("RESTORE_STATS", "Failed %d file, %s bytes",
|
||||
len(failedFiles), PrettySize(failedFileSize))
|
||||
}
|
||||
if len(failedFiles) > 0 {
|
||||
LOG_WARN("RESTORE_STATS", "Some files could not be restored")
|
||||
}
|
||||
|
||||
runningTime := time.Now().Unix() - startTime
|
||||
@@ -1149,8 +1200,11 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
||||
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
|
||||
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be
|
||||
// overwritten directly.
|
||||
// Return: true, nil: Restored file;
|
||||
// false, nil: Skipped file;
|
||||
// false, error: Failure to restore file (only if allowFailures == true)
|
||||
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
|
||||
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64) bool {
|
||||
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64, allowFailures bool) (bool, error) {
|
||||
|
||||
LOG_TRACE("DOWNLOAD_START", "Downloading %s", entry.Path)
|
||||
|
||||
@@ -1161,6 +1215,14 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
temporaryPath := path.Join(preferencePath, "temporary")
|
||||
fullPath := joinPath(top, entry.Path)
|
||||
|
||||
// Function to call on errors ignored by allowFailures
|
||||
var onFailure LogFunc
|
||||
if allowFailures {
|
||||
onFailure = LOG_WARN
|
||||
} else {
|
||||
onFailure = LOG_ERROR
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if existingFile != nil {
|
||||
existingFile.Close()
|
||||
@@ -1195,7 +1257,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
n := int64(1)
|
||||
@@ -1207,18 +1269,18 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
_, err = existingFile.Seek(entry.Size-n, 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
_, err = existingFile.Write([]byte("\x00\x00")[:n])
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to initialize the sparse file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
existingFile.Close()
|
||||
existingFile, err = os.Open(fullPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Can't reopen the initial file just created: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
isNewFile = true
|
||||
}
|
||||
@@ -1226,10 +1288,43 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
|
||||
}
|
||||
} else {
|
||||
// File already exists. Read file and hash entire contents. If fileHash == entry.Hash, skip file.
|
||||
// This is done before additional processing so that any identical files can be skipped regardless of the
|
||||
// -overwrite option
|
||||
fileHasher := manager.config.NewFileHasher()
|
||||
buffer := make([]byte, 64*1024)
|
||||
|
||||
for {
|
||||
n, err := existingFile.Read(buffer)
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to read existing file: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if n > 0 {
|
||||
fileHasher.Write(buffer[:n])
|
||||
}
|
||||
}
|
||||
|
||||
fileHash := hex.EncodeToString(fileHasher.Sum(nil))
|
||||
|
||||
if fileHash == entry.Hash && fileHash != "" {
|
||||
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// fileHash != entry.Hash, warn/error depending on -overwrite option
|
||||
if !overwrite {
|
||||
LOG_ERROR("DOWNLOAD_OVERWRITE",
|
||||
"File %s already exists. Please specify the -overwrite option to continue", entry.Path)
|
||||
return false
|
||||
var msg string
|
||||
if allowFailures {
|
||||
msg = "File %s already exists. Please specify the -overwrite option to overwrite"
|
||||
} else {
|
||||
msg = "File %s already exists. Please specify the -overwrite option to continue"
|
||||
}
|
||||
onFailure("DOWNLOAD_OVERWRITE", msg, entry.Path) // will exit program here if allowFailures = false
|
||||
return false, errors.New("file exists")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1299,7 +1394,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
}
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_SPLIT", "Failed to read existing file: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
if count > 0 {
|
||||
@@ -1339,9 +1434,11 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
return nil, false
|
||||
})
|
||||
}
|
||||
|
||||
// This is an additional check comparing fileHash to entry.Hash above, so this should no longer occur
|
||||
if fileHash == entry.Hash && fileHash != "" {
|
||||
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1369,7 +1466,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1401,7 +1498,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
_, err = existingFile.Seek(offset, 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_SEEK", "Failed to set the offset to %d for file %s: %v", offset, fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if the chunk is available in the existing file
|
||||
@@ -1411,19 +1508,21 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
_, err := io.CopyN(hasher, existingFile, int64(existingLengths[j]))
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_READ", "Failed to read the existing chunk %s: %v", hash, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
if IsDebugging() {
|
||||
LOG_DEBUG("DOWNLOAD_UNCHANGED", "Chunk %s is unchanged", manager.config.GetChunkIDFromHash(hash))
|
||||
}
|
||||
} else {
|
||||
chunk := chunkDownloader.WaitForChunk(i)
|
||||
_, err = existingFile.Write(chunk.GetBytes()[start:end])
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
|
||||
return false
|
||||
if !chunk.isBroken { // only write if chunk downloaded correctly
|
||||
_, err = existingFile.Write(chunk.GetBytes()[start:end])
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
hasher.Write(chunk.GetBytes()[start:end])
|
||||
}
|
||||
hasher.Write(chunk.GetBytes()[start:end])
|
||||
}
|
||||
|
||||
offset += int64(end - start)
|
||||
@@ -1432,15 +1531,15 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
// Must truncate the file if the new size is smaller
|
||||
if err = existingFile.Truncate(offset); err != nil {
|
||||
LOG_ERROR("DOWNLOAD_TRUNCATE", "Failed to truncate the file at %d: %v", offset, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Verify the download by hash
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
||||
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
|
||||
onFailure("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
|
||||
fullPath, "", entry.Hash)
|
||||
return false
|
||||
return false, errors.New("file corrupt (hash mismatch)")
|
||||
}
|
||||
|
||||
} else {
|
||||
@@ -1449,7 +1548,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
newFile, err = os.OpenFile(temporaryPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
hasher := manager.config.NewFileHasher()
|
||||
@@ -1488,21 +1587,23 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
if !hasLocalCopy {
|
||||
chunk := chunkDownloader.WaitForChunk(i)
|
||||
// If the chunk was downloaded from the storage, we may still need a portion of it.
|
||||
start := 0
|
||||
if i == entry.StartChunk {
|
||||
start = entry.StartOffset
|
||||
if !chunk.isBroken { // only get data if chunk downloaded correctly
|
||||
start := 0
|
||||
if i == entry.StartChunk {
|
||||
start = entry.StartOffset
|
||||
}
|
||||
end := chunk.GetLength()
|
||||
if i == entry.EndChunk {
|
||||
end = entry.EndOffset
|
||||
}
|
||||
data = chunk.GetBytes()[start:end]
|
||||
}
|
||||
end := chunk.GetLength()
|
||||
if i == entry.EndChunk {
|
||||
end = entry.EndOffset
|
||||
}
|
||||
data = chunk.GetBytes()[start:end]
|
||||
}
|
||||
|
||||
_, err = newFile.Write(data)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write file: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
hasher.Write(data)
|
||||
@@ -1511,9 +1612,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
||||
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
|
||||
onFailure("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
|
||||
entry.Path, hash, entry.Hash)
|
||||
return false
|
||||
return false, errors.New("file corrupt (hash mismatch)")
|
||||
}
|
||||
|
||||
if existingFile != nil {
|
||||
@@ -1527,20 +1628,20 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
err = os.Remove(fullPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
LOG_ERROR("DOWNLOAD_REMOVE", "Failed to remove the old file: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = os.Rename(temporaryPath, fullPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_RENAME", "Failed to rename the file %s to %s: %v", temporaryPath, fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if !showStatistics {
|
||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", entry.Path, entry.Size)
|
||||
}
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CopySnapshots copies the specified snapshots from one storage to the other.
|
||||
@@ -1705,7 +1806,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Chunks to copy = %d, to skip = %d, total = %d", chunksToCopy, chunksToSkip, chunksToCopy+chunksToSkip)
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Total chunks in source snapshot revisions = %d\n", len(chunks))
|
||||
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads)
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads, false)
|
||||
|
||||
chunkUploader := CreateChunkUploader(otherManager.config, otherManager.storage, nil, threads,
|
||||
func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||
|
||||
Reference in New Issue
Block a user