Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/duplicati/duplicati.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKenneth Skovhede <kenneth@hexad.dk>2020-01-23 15:52:43 +0300
committerGitHub <noreply@github.com>2020-01-23 15:52:43 +0300
commita64671a0c65673e49d413c07a03feda945eea94b (patch)
tree267f7cd1386814529fd6a02d8430c5442bb3a3a1
parentcb6af50a63ede85473e15c02c0888b8a902f9b4a (diff)
parent35ea890f96c3099f7b93e709a906b361bb88f030 (diff)
Merge pull request #4018 from warwickmm/refactor/fileset_removal
Refactor fileset deletion code and add tests
-rw-r--r--.travis.yml2
-rw-r--r--Duplicati/Library/Main/Database/LocalDeleteDatabase.cs31
-rw-r--r--Duplicati/Library/Main/Operation/DeleteHandler.cs262
-rw-r--r--Duplicati/UnitTest/DeleteHandlerTests.cs205
-rw-r--r--Duplicati/UnitTest/Duplicati.UnitTest.csproj1
-rwxr-xr-xpipeline/start.sh3
6 files changed, 376 insertions, 128 deletions
diff --git a/.travis.yml b/.travis.yml
index 20d9211aa..4412311a5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -69,7 +69,7 @@ jobs:
directories:
- $BUILD_DIR
script:
- - ${ROOT_DIR}/pipeline/jobs/unittest_job.sh --testcategories Filter,Targeted,Purge,Serialization,WebApi,Utility,UriUtility,IO,ImportExport,Disruption,RestoreHandler,RepairHandler
+ - ${ROOT_DIR}/pipeline/jobs/unittest_job.sh --testcategories Filter,Targeted,Purge,Serialization,WebApi,Utility,UriUtility,IO,ImportExport,Disruption,RestoreHandler,RepairHandler,DeleteHandler
- stage: tests
cache:
diff --git a/Duplicati/Library/Main/Database/LocalDeleteDatabase.cs b/Duplicati/Library/Main/Database/LocalDeleteDatabase.cs
index 57f34080a..397f4a17a 100644
--- a/Duplicati/Library/Main/Database/LocalDeleteDatabase.cs
+++ b/Duplicati/Library/Main/Database/LocalDeleteDatabase.cs
@@ -17,9 +17,11 @@
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
using System;
using System.Collections.Generic;
+using System.Data;
using System.Linq;
using System.Text;
using System.IO;
+using Duplicati.Library.Interface;
namespace Duplicati.Library.Main.Database
{
@@ -107,6 +109,35 @@ namespace Duplicati.Library.Main.Database
}
}
+ /// <summary>
+ /// Returns a collection of IListResultFilesets, where the Version is the backup version number
+ /// exposed to the user. This is in contrast to other cases where the Version is the ID in the
+ /// Fileset table.
+ /// </summary>
+ internal IEnumerable<IListResultFileset> FilesetsWithBackupVersion
+ {
+ get
+ {
+ List<IListResultFileset> filesets = new List<IListResultFileset>();
+ using (IDbCommand cmd = this.m_connection.CreateCommand())
+ {
+ // We can also use the ROW_NUMBER() window function to generate the backup versions,
+ // but this requires at least SQLite 3.25, which is not available in some common
+ // distributions (e.g., Debian) currently.
+ using (IDataReader reader = cmd.ExecuteReader(@"SELECT ""IsFullBackup"", ""Timestamp"" FROM ""Fileset"" ORDER BY ""Timestamp"" DESC"))
+ {
+ int version = 0;
+ while (reader.Read())
+ {
+ filesets.Add(new ListResultFileset(version++, reader.GetInt32(0), ParseFromEpochSeconds(reader.GetInt64(1)).ToLocalTime(), -1L, -1L));
+ }
+ }
+ }
+
+ return filesets;
+ }
+ }
+
private struct VolumeUsage
{
public readonly string Name;
diff --git a/Duplicati/Library/Main/Operation/DeleteHandler.cs b/Duplicati/Library/Main/Operation/DeleteHandler.cs
index e67f450bd..1f059a9e3 100644
--- a/Duplicati/Library/Main/Operation/DeleteHandler.cs
+++ b/Duplicati/Library/Main/Operation/DeleteHandler.cs
@@ -20,8 +20,8 @@
using System;
using System.Linq;
using System.Collections.Generic;
-using System.Globalization;
using Duplicati.Library.Interface;
+using Duplicati.Library.Main.Database;
namespace Duplicati.Library.Main.Operation
{
@@ -30,11 +30,7 @@ namespace Duplicati.Library.Main.Operation
/// <summary>
/// The tag used for logging
/// </summary>
- private static readonly string LOGTAG = Logging.Log.LogTagFromType<DeleteHandler>();
- /// <summary>
- /// The tag used for logging retention policy messages
- /// </summary>
- private static readonly string LOGTAG_RETENTION = LOGTAG + ":RetentionPolicy";
+ internal static readonly string LOGTAG = Logging.Log.LogTagFromType<DeleteHandler>();
private readonly DeleteResults m_result;
protected readonly string m_backendurl;
@@ -92,22 +88,27 @@ namespace Duplicati.Library.Main.Operation
var backend = bk ?? sharedManager;
if (!hasVerifiedBacked && !m_options.NoBackendverification)
- FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
-
- var filesetNumbers = db.FilesetTimes.Zip(Enumerable.Range(0, db.FilesetTimes.Count()), (a, b) => new Tuple<long, DateTime>(b, a.Value)).ToList();
- var sets = db.FilesetTimes.Select(x => x.Value).ToArray();
- var toDelete = GetFilesetsToDelete(db, sets);
+ FilelistProcessor.VerifyRemoteList(backend, m_options, db, m_result.BackendWriter);
+
+ IListResultFileset[] filesets = db.FilesetsWithBackupVersion.ToArray();
+ List<IListResultFileset> versionsToDelete = new List<IListResultFileset>();
+ versionsToDelete.AddRange(new SpecificVersionsRemover(this.m_options).GetFilesetsToDelete(filesets));
+ versionsToDelete.AddRange(new KeepTimeRemover(this.m_options).GetFilesetsToDelete(filesets));
+ versionsToDelete.AddRange(new RetentionPolicyRemover(this.m_options).GetFilesetsToDelete(filesets));
- if (!m_options.AllowFullRemoval && sets.Length == toDelete.Length)
+ // When determining the number of full versions to keep, we need to ignore the versions already marked for removal.
+ versionsToDelete.AddRange(new KeepVersionsRemover(this.m_options).GetFilesetsToDelete(filesets.Except(versionsToDelete)));
+
+ if (!m_options.AllowFullRemoval && filesets.Length == versionsToDelete.Count)
{
Logging.Log.WriteInformationMessage(LOGTAG, "PreventingLastFilesetRemoval", "Preventing removal of last fileset, use --{0} to allow removal ...", "allow-full-removal");
- toDelete = toDelete.Skip(1).ToArray();
+ versionsToDelete = versionsToDelete.OrderBy(x => x.Version).Skip(1).ToList();
}
- if (toDelete != null && toDelete.Length > 0)
- Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", toDelete.Length);
+ if (versionsToDelete.Count > 0)
+ Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileset", "Deleting {0} remote fileset(s) ...", versionsToDelete.Count);
- var lst = db.DropFilesetsFromTable(toDelete, transaction).ToArray();
+ var lst = db.DropFilesetsFromTable(versionsToDelete.Select(x => x.Time).ToArray(), transaction).ToArray();
foreach(var f in lst)
db.UpdateRemoteVolume(f.Key, RemoteVolumeState.Deleting, f.Value, null, transaction);
@@ -156,222 +157,233 @@ namespace Duplicati.Library.Main.Operation
Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteHelp", "Remove --dry-run to actually delete files");
}
- if (!m_options.NoAutoCompact && (forceCompact || (toDelete != null && toDelete.Length > 0)))
+ if (!m_options.NoAutoCompact && (forceCompact || versionsToDelete.Count > 0))
{
m_result.CompactResults = new CompactResults(m_result);
new CompactHandler(m_backendurl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref transaction, sharedManager);
}
-
- m_result.SetResults(
- from n in filesetNumbers
- where toDelete.Contains(n.Item2)
- select n,
- m_options.Dryrun);
+
+ m_result.SetResults(versionsToDelete.Select(v => new Tuple<long, DateTime>(v.Version, v.Time)), m_options.Dryrun);
}
}
+ }
- /// <summary>
- /// Gets the filesets selected for deletion
- /// </summary>
- /// <returns>The filesets to delete</returns>
- /// <param name="allBackups">The list of backups that can be deleted</param>
- private DateTime[] GetFilesetsToDelete(Database.LocalDeleteDatabase db, DateTime[] allBackups)
+ public abstract class FilesetRemover
+ {
+ protected readonly Options Options;
+
+ protected FilesetRemover(Options options)
{
- if (allBackups.Length == 0)
- {
- return allBackups;
- }
+ this.Options = options;
+ }
- DateTime[] sortedAllBackups = allBackups.OrderByDescending(x => x.ToUniversalTime()).ToArray();
+ public abstract IEnumerable<IListResultFileset> GetFilesetsToDelete(IEnumerable<IListResultFileset> filesets);
+ }
- if (sortedAllBackups.Select(x => x.ToUniversalTime()).Distinct().Count() != sortedAllBackups.Length)
- {
- throw new Exception($"List of backup timestamps contains duplicates: {string.Join(", ", sortedAllBackups.Select(x => x.ToString()))}");
- }
+ /// <summary>
+ /// Remove versions specified by the --version option.
+ /// </summary>
+ public class SpecificVersionsRemover : FilesetRemover
+ {
+ public SpecificVersionsRemover(Options options) : base(options)
+ {
+ }
- List<DateTime> toDelete = new List<DateTime>();
+ public override IEnumerable<IListResultFileset> GetFilesetsToDelete(IEnumerable<IListResultFileset> filesets)
+ {
+ ISet<long> versionsToDelete = new HashSet<long>(this.Options.Version ?? new long[0]);
+ return filesets.Where(x => versionsToDelete.Contains(x.Version));
+ }
+ }
- // Remove backups explicitly specified via option
- var versions = m_options.Version;
- if (versions != null && versions.Length > 0)
- {
- foreach (var ix in versions.Distinct())
- {
- if (ix >= 0 && ix < sortedAllBackups.Length)
- {
- toDelete.Add(sortedAllBackups[ix]);
- }
- }
- }
+ /// <summary>
+ /// Keep backups that are newer than the date specified by the --keep-time option.
+ /// If none of the retained versions are full backups, then continue to keep versions
+ /// until we have a full backup.
+ /// </summary>
+ public class KeepTimeRemover : FilesetRemover
+ {
+ public KeepTimeRemover(Options options) : base(options)
+ {
+ }
+
+ public override IEnumerable<IListResultFileset> GetFilesetsToDelete(IEnumerable<IListResultFileset> filesets)
+ {
+ IListResultFileset[] sortedFilesets = filesets.OrderByDescending(x => x.Time).ToArray();
+ List<IListResultFileset> versionsToDelete = new List<IListResultFileset>();
- // Remove backups that are older than date specified via option while ensuring
- // that we always have at least one full backup.
- var keepTime = m_options.KeepTime;
- if (keepTime.Ticks > 0)
+ DateTime earliestTime = this.Options.KeepTime;
+ if (earliestTime.Ticks > 0)
{
bool haveFullBackup = false;
- toDelete.AddRange(sortedAllBackups.SkipWhile(x =>
+ versionsToDelete.AddRange(sortedFilesets.SkipWhile(x =>
{
- bool keepBackup = (x >= keepTime) || !haveFullBackup;
- haveFullBackup = haveFullBackup || db.IsFilesetFullBackup(x);
+ bool keepBackup = (x.Time >= earliestTime) || !haveFullBackup;
+ haveFullBackup = haveFullBackup || (x.IsFullBackup == BackupType.FULL_BACKUP);
return keepBackup;
}));
}
- // Remove backups via retention policy option
- toDelete.AddRange(ApplyRetentionPolicy(db, sortedAllBackups));
+ return versionsToDelete;
+ }
+ }
+
+ /// <summary>
+ /// Keep a number of recent full backups as specified by the --keep-versions option.
+ /// Partial backups that are surrounded by full backups will also be removed.
+ /// </summary>
+ public class KeepVersionsRemover : FilesetRemover
+ {
+ public KeepVersionsRemover(Options options) : base(options)
+ {
+ }
+
+ public override IEnumerable<IListResultFileset> GetFilesetsToDelete(IEnumerable<IListResultFileset> filesets)
+ {
+ IListResultFileset[] sortedFilesets = filesets.OrderByDescending(x => x.Time).ToArray();
+ List<IListResultFileset> versionsToDelete = new List<IListResultFileset>();
// Check how many full backups will be remaining after the previous steps
// and remove oldest backups while there are still more backups than should be kept as specified via option
- var backupsRemaining = sortedAllBackups.Except(toDelete).ToList();
- var fullVersionsToKeep = m_options.KeepVersions;
- if (fullVersionsToKeep > 0 && fullVersionsToKeep < backupsRemaining.Count)
+ int fullVersionsToKeep = this.Options.KeepVersions;
+ if (fullVersionsToKeep > 0 && fullVersionsToKeep < sortedFilesets.Length)
{
int fullVersionsKept = 0;
- ISet<DateTime> intermediatePartials = new HashSet<DateTime>();
+ ISet<IListResultFileset> intermediatePartials = new HashSet<IListResultFileset>();
// Enumerate the collection starting from the most recent full backup.
- foreach (DateTime backup in backupsRemaining.SkipWhile(x => !db.IsFilesetFullBackup(x)))
+ foreach (IListResultFileset fileset in sortedFilesets.SkipWhile(x => x.IsFullBackup == BackupType.PARTIAL_BACKUP))
{
if (fullVersionsKept >= fullVersionsToKeep)
{
// If we have enough full backups, delete all older backups.
- toDelete.Add(backup);
+ versionsToDelete.Add(fileset);
}
- else if (db.IsFilesetFullBackup(backup))
+ else if (fileset.IsFullBackup == BackupType.FULL_BACKUP)
{
// We can delete partial backups that are surrounded by full backups.
- toDelete.AddRange(intermediatePartials);
+ versionsToDelete.AddRange(intermediatePartials);
intermediatePartials.Clear();
fullVersionsKept++;
}
else
{
- intermediatePartials.Add(backup);
+ intermediatePartials.Add(fileset);
}
}
}
- var toDeleteDistinct = toDelete.Distinct().OrderByDescending(x => x.ToUniversalTime()).ToArray();
- var removeCount = toDeleteDistinct.Length;
- if (removeCount > sortedAllBackups.Length)
- {
- throw new Exception($"Too many entries {removeCount} vs {sortedAllBackups.Length}, lists: {string.Join(", ", toDeleteDistinct.Select(x => x.ToString(CultureInfo.InvariantCulture)))} vs {string.Join(", ", sortedAllBackups.Select(x => x.ToString(CultureInfo.InvariantCulture)))}");
- }
+ return versionsToDelete;
+ }
+ }
+
+ /// <summary>
+ /// Remove backups according to the --retention-policy option.
+ /// Backups that are not within any of the specified time frames will will NOT be deleted.
+ /// Partial backups are not removed.
+ /// </summary>
+ public class RetentionPolicyRemover : FilesetRemover
+ {
+ private static readonly string LOGTAG_RETENTION = DeleteHandler.LOGTAG + ":RetentionPolicy";
- return toDeleteDistinct;
+ public RetentionPolicyRemover(Options options) : base(options)
+ {
}
- /// <summary>
- /// Deletes backups according to the retention policy configuration.
- /// Backups that are not within any of the specified time frames will will NOT be deleted.
- /// </summary>
- /// <returns>The filesets to delete</returns>
- /// <param name="backups">The list of backups that can be deleted</param>
- private List<DateTime> ApplyRetentionPolicy(Database.LocalDeleteDatabase db, DateTime[] backups)
+ public override IEnumerable<IListResultFileset> GetFilesetsToDelete(IEnumerable<IListResultFileset> filesets)
{
- // Any work to do?
- var retentionPolicyOptionValues = m_options.RetentionPolicy;
- if (retentionPolicyOptionValues.Count == 0 || backups.Length == 0)
+ IListResultFileset[] sortedFilesets = filesets.OrderByDescending(x => x.Time).ToArray();
+ List<IListResultFileset> versionsToDelete = new List<IListResultFileset>();
+
+ List<Options.RetentionPolicyValue> retentionPolicyOptionValues = this.Options.RetentionPolicy;
+ if (retentionPolicyOptionValues.Count == 0 || sortedFilesets.Length == 0)
{
- return new List<DateTime>(); // don't delete any backups
+ return versionsToDelete;
}
Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "StartCheck", "Start checking if backups can be removed");
// Work with a copy to not modify the enumeration that the caller passed
- List<DateTime> clonedBackupList = new List<DateTime>(backups);
-
- // Make sure the backups are in descending order (newest backup in the beginning)
- clonedBackupList = clonedBackupList.OrderByDescending(x => x).ToList();
+ List<IListResultFileset> clonedBackupList = new List<IListResultFileset>(sortedFilesets);
// Most recent backup usually should never get deleted in this process, so exclude it for now,
// but keep a reference to potential delete it when allow-full-removal is set
- var mostRecentBackup = clonedBackupList.ElementAt(0);
+ IListResultFileset mostRecentBackup = clonedBackupList.ElementAt(0);
clonedBackupList.RemoveAt(0);
- var deleteMostRecentBackup = m_options.AllowFullRemoval;
-
- Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "FramesAndIntervals", "Time frames and intervals pairs: {0}",
- string.Join(", ", retentionPolicyOptionValues));
+ bool deleteMostRecentBackup = this.Options.AllowFullRemoval;
- Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "BackupList", "Backups to consider: {0}",
- string.Join(", ", clonedBackupList));
+ Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "FramesAndIntervals", "Time frames and intervals pairs: {0}", string.Join(", ", retentionPolicyOptionValues));
+ Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "BackupList", "Backups to consider: {0}", string.Join(", ", clonedBackupList));
// Collect all potential backups in each time frame and thin out according to the specified interval,
// starting with the oldest backup in that time frame.
// The order in which the time frames values are checked has to be from the smallest to the largest.
- List<DateTime> backupsToDelete = new List<DateTime>();
- var now = DateTime.Now;
- foreach (var singleRetentionPolicyOptionValue in retentionPolicyOptionValues.OrderBy(x => x.Timeframe))
+ DateTime now = DateTime.Now;
+ foreach (Options.RetentionPolicyValue singleRetentionPolicyOptionValue in retentionPolicyOptionValues.OrderBy(x => x.Timeframe))
{
// The timeframe in the retention policy option is only a timespan which has to be applied to the current DateTime to get the actual lower bound
DateTime timeFrame = (singleRetentionPolicyOptionValue.IsUnlimtedTimeframe()) ? DateTime.MinValue : (now - singleRetentionPolicyOptionValue.Timeframe);
Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "NextTimeAndFrame", "Next time frame and interval pair: {0}", singleRetentionPolicyOptionValue.ToString());
- List<DateTime> backupsInTimeFrame = new List<DateTime>();
- while (clonedBackupList.Count > 0 && clonedBackupList[0] >= timeFrame)
+ List<IListResultFileset> backupsInTimeFrame = new List<IListResultFileset>();
+ while (clonedBackupList.Count > 0 && clonedBackupList[0].Time >= timeFrame)
{
backupsInTimeFrame.Insert(0, clonedBackupList[0]); // Insert at beginning to reverse order, which is necessary for next step
clonedBackupList.RemoveAt(0); // remove from here to not handle the same backup in two time frames
}
- Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "BackupsInFrame", "Backups in this time frame: {0}",
- string.Join(", ", backupsInTimeFrame));
+ Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "BackupsInFrame", "Backups in this time frame: {0}", string.Join(", ", backupsInTimeFrame));
// Run through backups in this time frame
- DateTime? lastKept = null;
- foreach (DateTime backup in backupsInTimeFrame)
+ IListResultFileset lastKept = null;
+ foreach (IListResultFileset fileset in backupsInTimeFrame)
{
- var isFullBackup = db.IsFilesetFullBackup(backup);
+ bool isFullBackup = fileset.IsFullBackup == BackupType.FULL_BACKUP;
// Keep this backup if
// - no backup has yet been added to the time frame (keeps at least the oldest backup in a time frame)
// - difference between last added backup and this backup is bigger than the specified interval
- if (lastKept == null || singleRetentionPolicyOptionValue.IsKeepAllVersions() || (backup - lastKept.Value) >= singleRetentionPolicyOptionValue.Interval)
+ if (lastKept == null || singleRetentionPolicyOptionValue.IsKeepAllVersions() || (fileset.Time - lastKept.Time) >= singleRetentionPolicyOptionValue.Interval)
{
- Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "KeepBackups", $"Keeping {(isFullBackup ? "" : "partial")} backup: {backup}", Logging.LogMessageType.Profiling);
+ Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "KeepBackups", $"Keeping {(isFullBackup ? "" : "partial")} backup: {fileset}", Logging.LogMessageType.Profiling);
if (isFullBackup)
{
- lastKept = backup;
+ lastKept = fileset;
}
}
else
{
if (isFullBackup)
{
- Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "DeletingBackups",
- "Deleting backup: {0}", backup);
- backupsToDelete.Add(backup);
+ Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "DeletingBackups", "Deleting backup: {0}", fileset);
+ versionsToDelete.Add(fileset);
}
else
{
- Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "KeepBackups", $"Keeping partial backup: {backup}", Logging.LogMessageType.Profiling);
+ Logging.Log.WriteProfilingMessage(LOGTAG_RETENTION, "KeepBackups", $"Keeping partial backup: {fileset}", Logging.LogMessageType.Profiling);
}
}
}
// Check if most recent backup is outside of this time frame (meaning older/smaller)
- deleteMostRecentBackup &= (mostRecentBackup < timeFrame);
+ deleteMostRecentBackup &= (mostRecentBackup.Time < timeFrame);
}
// Delete all remaining backups
- backupsToDelete.AddRange(clonedBackupList);
- Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "BackupsToDelete", "Backups outside of all time frames and thus getting deleted: {0}",
- string.Join(", ", clonedBackupList));
+ versionsToDelete.AddRange(clonedBackupList);
+ Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "BackupsToDelete", "Backups outside of all time frames and thus getting deleted: {0}", string.Join(", ", clonedBackupList));
// Delete most recent backup if allow-full-removal is set and the most current backup is outside of any time frame
if (deleteMostRecentBackup)
{
- backupsToDelete.Add(mostRecentBackup);
- Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "DeleteMostRecent", "Deleting most recent backup: {0}",
- mostRecentBackup);
+ versionsToDelete.Add(mostRecentBackup);
+ Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "DeleteMostRecent", "Deleting most recent backup: {0}", mostRecentBackup);
}
- Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "AllBackupsToDelete", "All backups to delete: {0}",
- string.Join(", ", backupsToDelete.OrderByDescending(x => x)));
+ Logging.Log.WriteInformationMessage(LOGTAG_RETENTION, "AllBackupsToDelete", "All backups to delete: {0}", string.Join(", ", versionsToDelete.OrderByDescending(x => x.Time)));
- return backupsToDelete;
+ return versionsToDelete;
}
}
}
diff --git a/Duplicati/UnitTest/DeleteHandlerTests.cs b/Duplicati/UnitTest/DeleteHandlerTests.cs
new file mode 100644
index 000000000..e472ca401
--- /dev/null
+++ b/Duplicati/UnitTest/DeleteHandlerTests.cs
@@ -0,0 +1,205 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Duplicati.Library.Interface;
+using Duplicati.Library.Main;
+using Duplicati.Library.Main.Database;
+using Duplicati.Library.Main.Operation;
+using NUnit.Framework;
+
+namespace Duplicati.UnitTest
+{
+ public class DeleteHandlerTests
+ {
+ private class Fileset : IListResultFileset, IEquatable<Fileset>
+ {
+ public long Version { get; }
+ public int IsFullBackup { get; }
+ public DateTime Time { get; }
+ public long FileCount { get; }
+ public long FileSizes { get; }
+
+ public Fileset(int version, int backupType, DateTime time)
+ {
+ this.Version = version;
+ this.IsFullBackup = backupType;
+ this.Time = time;
+ this.FileCount = 0;
+ this.FileSizes = 0;
+ }
+
+ public bool Equals(Fileset other)
+ {
+ if (ReferenceEquals(null, other))
+ {
+ return false;
+ }
+ if (ReferenceEquals(this, other))
+ {
+ return true;
+ }
+ return this.Version == other.Version && this.IsFullBackup == other.IsFullBackup && this.Time.Equals(other.Time) && this.FileCount == other.FileCount && this.FileSizes == other.FileSizes;
+ }
+ }
+
+ [Test]
+ [Category("DeleteHandler")]
+ public void KeepTimeRemover()
+ {
+ DateTime today = DateTime.Today;
+ List<IListResultFileset> filesets = new List<IListResultFileset>
+ {
+ new Fileset(0, BackupType.FULL_BACKUP, today.AddDays(-2)),
+ new Fileset(1, BackupType.FULL_BACKUP, today.AddDays(-1)),
+ new Fileset(2, BackupType.PARTIAL_BACKUP, today),
+ new Fileset(3, BackupType.PARTIAL_BACKUP, today.AddDays(1)),
+ };
+
+ // Although version 1 is older than the specified TimeSpan, we do not delete
+ // it because it is the most recent full backup.
+ TimeSpan timeSpan = new TimeSpan(1, 0, 0, 0);
+ IListResultFileset[] expectedFilesetsToRemove = {filesets[0]};
+
+ Random random = new Random();
+ Options options = new Options(new Dictionary<string, string> {{"keep-time", $"{(int) timeSpan.TotalSeconds}s"}});
+ KeepTimeRemover remover = new KeepTimeRemover(options);
+ IListResultFileset[] filesetsToRemove = remover.GetFilesetsToDelete(filesets.OrderBy(x => random.Next())).ToArray();
+ CollectionAssert.AreEquivalent(expectedFilesetsToRemove, filesetsToRemove);
+
+ // If there is a full backup within the specified TimeSpan, we can respect
+ // the TimeSpan strictly.
+ filesets.Add(new Fileset(4, BackupType.FULL_BACKUP, today.AddDays(2)));
+ CollectionAssert.AreEquivalent(expectedFilesetsToRemove, filesetsToRemove);
+ expectedFilesetsToRemove = filesets.Where(x => x.Time < options.KeepTime).ToArray();
+ filesetsToRemove = remover.GetFilesetsToDelete(filesets).ToArray();
+
+ CollectionAssert.AreEquivalent(expectedFilesetsToRemove, filesetsToRemove);
+ }
+
+ [Test]
+ [Category("DeleteHandler")]
+ public void KeepVersionsRemover()
+ {
+ IListResultFileset[] filesets =
+ {
+ new Fileset(0, BackupType.FULL_BACKUP, new DateTime(2000, 1, 1)),
+ new Fileset(1, BackupType.PARTIAL_BACKUP, new DateTime(2000, 1, 2)),
+ new Fileset(2, BackupType.FULL_BACKUP, new DateTime(2000, 1, 3)),
+ new Fileset(3, BackupType.PARTIAL_BACKUP, new DateTime(2000, 1, 4)),
+ new Fileset(4, BackupType.FULL_BACKUP, new DateTime(2000, 1, 5)),
+ new Fileset(5, BackupType.PARTIAL_BACKUP, new DateTime(2000, 1, 6)),
+ };
+
+ Options options = new Options(new Dictionary<string, string> {{"keep-versions", "2"}});
+ IListResultFileset[] expectedFilesetsToRemove =
+ {
+ filesets[0], // Delete; third oldest full backup.
+ filesets[1], // Delete; intermediate partial backup.
+ filesets[3] // Delete; intermediate partial backup.
+ };
+
+ Random random = new Random();
+ KeepVersionsRemover remover = new KeepVersionsRemover(options);
+ IListResultFileset[] filesetsToRemove = remover.GetFilesetsToDelete(filesets.OrderBy(x => random.Next())).ToArray();
+
+ CollectionAssert.AreEquivalent(expectedFilesetsToRemove, filesetsToRemove);
+ }
+
+ [Test]
+ [Category("DeleteHandler")]
+ public void RetentionPolicyRemover()
+ {
+ Options options = new Options(new Dictionary<string, string> {{"retention-policy", "1W:U,3M:1D,1Y:1W,U:1M"}});
+
+ DateTime now = DateTime.Now;
+ IListResultFileset[] filesets =
+ {
+ // Past week. These should all be retained.
+ new Fileset(0, BackupType.PARTIAL_BACKUP, now),
+ new Fileset(1, BackupType.FULL_BACKUP, now.AddMilliseconds(-1)),
+ new Fileset(2, BackupType.FULL_BACKUP, now.AddSeconds(-1)),
+ new Fileset(3, BackupType.PARTIAL_BACKUP, now.AddMinutes(-1)),
+ new Fileset(4, BackupType.PARTIAL_BACKUP, now.AddHours(-1)),
+ new Fileset(5, BackupType.PARTIAL_BACKUP, now.AddDays(-1)),
+ new Fileset(6, BackupType.FULL_BACKUP, now.AddDays(-6)),
+
+ // Past 3 months.
+ new Fileset(7, BackupType.FULL_BACKUP, now.AddDays(-8)), // Keep; first in interval.
+ new Fileset(8, BackupType.FULL_BACKUP, now.AddDays(-8).AddHours(1)), // Delete; second in interval.
+ new Fileset(9, BackupType.FULL_BACKUP, now.AddDays(-8).AddHours(2)), // Delete; third in interval
+ new Fileset(10, BackupType.PARTIAL_BACKUP, now.AddMonths(-1)), // Keep; partial
+ new Fileset(11, BackupType.PARTIAL_BACKUP, now.AddMonths(-1).AddHours(1)), // Keep; partial
+ new Fileset(12, BackupType.FULL_BACKUP, now.AddMonths(-1).AddHours(2)), // Keep; first full in interval. Do not discard full in favor of partial.
+ new Fileset(13, BackupType.FULL_BACKUP, now.AddMonths(-2)), // Keep; first in interval
+ new Fileset(14, BackupType.FULL_BACKUP, now.AddMonths(-2).AddHours(1)), // Delete; second in interval.
+ new Fileset(15, BackupType.FULL_BACKUP, now.AddDays(-89).AddHours(1)), // Keep; first in interval.
+ new Fileset(16, BackupType.FULL_BACKUP, now.AddDays(-89).AddHours(2)), // Delete; second in interval.
+
+ // Past year.
+ new Fileset(17, BackupType.FULL_BACKUP, now.AddDays(-92)), // Keep; first in interval.
+ new Fileset(18, BackupType.FULL_BACKUP, now.AddDays(-91)), // Delete; second in interval.
+ new Fileset(19, BackupType.PARTIAL_BACKUP, now.AddDays(-(90 + 73))), // Keep; partial
+ new Fileset(20, BackupType.PARTIAL_BACKUP, now.AddDays(-(90 + 72))), // Keep; partial.
+ new Fileset(21, BackupType.PARTIAL_BACKUP, now.AddDays(-(90 + 71))), // Keep; first full in interval. Do not discard full in favor of partial.
+ new Fileset(22, BackupType.FULL_BACKUP, now.AddDays(-(90 + 142))), // Keep; first in interval.
+ new Fileset(23, BackupType.FULL_BACKUP, now.AddDays(-(90 + 141))), // Delete; second in interval.
+
+ // Unlimited.
+ new Fileset(24, BackupType.FULL_BACKUP, now.AddYears(-1).AddMonths(-1)), // Keep; first in interval.
+ new Fileset(25, BackupType.FULL_BACKUP, now.AddYears(-1).AddMonths(-1).AddDays(1)), // Delete; second in interval.
+ new Fileset(26, BackupType.PARTIAL_BACKUP, new DateTime(1, 1, 1)), // Keep; partial
+ new Fileset(27, BackupType.FULL_BACKUP, new DateTime(1, 1, 30)), // Keep; first full in interval. Do not discard full in favor of partial.
+ };
+
+ IListResultFileset[] expectedFilesetsToRemove =
+ {
+ // 3M:1D
+ filesets[8],
+ filesets[9],
+ filesets[14],
+ filesets[16],
+
+ // 1Y:1W
+ filesets[18],
+ filesets[23],
+
+ // U:1M
+ filesets[25],
+ };
+
+ Random random = new Random();
+ RetentionPolicyRemover remover = new RetentionPolicyRemover(options);
+ IListResultFileset[] filesetsToRemove = remover.GetFilesetsToDelete(filesets.OrderBy(x => random.Next())).ToArray();
+
+ CollectionAssert.AreEquivalent(expectedFilesetsToRemove, filesetsToRemove);
+ }
+
+ [Test]
+ [Category("DeleteHandler")]
+ public void SpecificVersionsRemover()
+ {
+ IListResultFileset[] filesets =
+ {
+ new Fileset(0, BackupType.FULL_BACKUP, new DateTime(2000, 1, 1)),
+ new Fileset(1, BackupType.PARTIAL_BACKUP, new DateTime(2000, 1, 2)),
+ new Fileset(2, BackupType.FULL_BACKUP, new DateTime(2000, 1, 3)),
+ new Fileset(3, BackupType.PARTIAL_BACKUP, new DateTime(2000, 1, 4)),
+ new Fileset(4, BackupType.FULL_BACKUP, new DateTime(2000, 1, 5))
+ };
+
+ Options options = new Options(new Dictionary<string, string> {{"version", "0,3,4"}});
+ IListResultFileset[] expectedFilesetsToRemove =
+ {
+ filesets[0],
+ filesets[3],
+ filesets[4]
+ };
+
+ Random random = new Random();
+ SpecificVersionsRemover remover = new SpecificVersionsRemover(options);
+ IListResultFileset[] filesetsToRemove = remover.GetFilesetsToDelete(filesets.OrderBy(x => random.Next())).ToArray();
+
+ CollectionAssert.AreEquivalent(expectedFilesetsToRemove, filesetsToRemove);
+ }
+ }
+} \ No newline at end of file
diff --git a/Duplicati/UnitTest/Duplicati.UnitTest.csproj b/Duplicati/UnitTest/Duplicati.UnitTest.csproj
index 6d7899dc7..49b64f60c 100644
--- a/Duplicati/UnitTest/Duplicati.UnitTest.csproj
+++ b/Duplicati/UnitTest/Duplicati.UnitTest.csproj
@@ -43,6 +43,7 @@
<Reference Include="System.IO.Compression.FileSystem" />
</ItemGroup>
<ItemGroup>
+ <Compile Include="DeleteHandlerTests.cs" />
<Compile Include="DisruptionTests.cs" />
<Compile Include="RecoveryToolTests.cs" />
<Compile Include="RestoreHandlerTests.cs" />
diff --git a/pipeline/start.sh b/pipeline/start.sh
index d052abd94..9267229be 100755
--- a/pipeline/start.sh
+++ b/pipeline/start.sh
@@ -8,5 +8,4 @@ ${ROOT_DIR}/pipeline/jobs/unittest_job.sh --testcategories BulkNormal
${ROOT_DIR}/pipeline/jobs/unittest_job.sh --testcategories BulkNoSize
${ROOT_DIR}/pipeline/jobs/unittest_job.sh --testcategories SVNDataLong,SVNData,RecoveryTool
${ROOT_DIR}/pipeline/jobs/unittest_job.sh --testcategories Border
-${ROOT_DIR}/pipeline/jobs/unittest_job.sh --testcategories Filter,Targeted,Purge,Serialization,WebApi,Utility,UriUtility,IO,ImportExport,Disruption,RestoreHandler,RepairHandler
-
+${ROOT_DIR}/pipeline/jobs/unittest_job.sh --testcategories Filter,Targeted,Purge,Serialization,WebApi,Utility,UriUtility,IO,ImportExport,Disruption,RestoreHandler,RepairHandler,DeleteHandler