Welcome to mirror list, hosted at ThFree Co, Russian Federation.

github.com/duplicati/duplicati.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKenneth Skovhede <kenneth@hexad.dk>2016-03-17 23:51:42 +0300
committerKenneth Skovhede <kenneth@hexad.dk>2016-03-17 23:51:42 +0300
commit0ebbef0b9749425c1fa2dc85a24563994affe782 (patch)
tree5720c106f22e3ff66d11519ea549d9f97c4b93ec /Duplicati
parentf3477cac3a04b342607adccff3b09dcd11d5aef7 (diff)
Implemented feature that does not delete extra index files if their content fits the database.
Diffstat (limited to 'Duplicati')
-rw-r--r--Duplicati/Library/Main/Database/LocalRepairDatabase.cs81
-rw-r--r--Duplicati/Library/Main/Operation/RepairHandler.cs52
2 files changed, 133 insertions, 0 deletions
diff --git a/Duplicati/Library/Main/Database/LocalRepairDatabase.cs b/Duplicati/Library/Main/Database/LocalRepairDatabase.cs
index 195c8efe7..32f3ab1cc 100644
--- a/Duplicati/Library/Main/Database/LocalRepairDatabase.cs
+++ b/Duplicati/Library/Main/Database/LocalRepairDatabase.cs
@@ -490,6 +490,87 @@ namespace Duplicati.Library.Main.Database
}
}
}
+
+ public void CheckAllBlocksAreInVolume(string filename, IEnumerable<KeyValuePair<string, long>> blocks)
+ {
+ using(var tr = m_connection.BeginTransaction())
+ using(var cmd = m_connection.CreateCommand(tr))
+ {
+ var tablename = "ProbeBlocks-" + Library.Utility.Utility.ByteArrayAsHexString(Guid.NewGuid().ToByteArray());
+
+ cmd.ExecuteNonQuery(string.Format(@"CREATE TEMPORARY TABLE ""{0}"" (""Hash"" TEXT NOT NULL, ""Size"" INTEGER NOT NULL)", tablename));
+ cmd.CommandText = string.Format(@"INSERT INTO ""{0}"" (""Hash"", ""Size"") VALUES (?, ?)", tablename);
+ cmd.AddParameters(2);
+
+ foreach(var kp in blocks)
+ {
+ cmd.SetParameterValue(0, kp.Key);
+ cmd.SetParameterValue(1, kp.Value);
+ cmd.ExecuteNonQuery();
+ }
+
+ var id = cmd.ExecuteScalarInt64(@"SELECT ""ID"" FROM ""RemoteVolume"" WHERE ""Name"" = ?", -1, filename);
+ var aliens = cmd.ExecuteScalarInt64(string.Format(@"SELECT COUNT(*) FROM (SELECT ""A"".""VolumeID"" FROM ""{0}"" B LEFT OUTER JOIN ""Block"" A ON ""A"".""Hash"" = ""B"".""Hash"" AND ""A"".""Size"" = ""B"".""Size"") WHERE ""VolumeID"" != ? ", tablename), 0, id);
+
+ cmd.ExecuteNonQuery(string.Format(@"DROP TABLE IF EXISTS ""{0}"" ", tablename));
+
+ if (aliens != 0)
+ throw new Exception(string.Format("Not all blocks were found in {0}", filename));
+ }
+ }
+
+ public void CheckBlocklistCorrect(string hash, long length, IEnumerable<string> blocklist, long blocksize, long blockhashlength)
+ {
+ using(var cmd = m_connection.CreateCommand())
+ {
+ var query = string.Format(@"
+SELECT
+ ""C"".""Hash"",
+ ""C"".""Size""
+FROM
+ ""BlocksetEntry"" A,
+ (
+ SELECT
+ ""Y"".""BlocksetID"",
+ ""Y"".""Hash"" AS ""BlocklistHash"",
+ ""Y"".""Index"" AS ""BlocklistHashIndex"",
+ ""Z"".""Size"" AS ""BlocklistSize"",
+ ""Z"".""ID"" AS ""BlocklistHashBlockID""
+ FROM
+ ""BlocklistHash"" Y,
+ ""Block"" Z
+ WHERE
+ ""Y"".""Hash"" = ""Z"".""Hash"" AND ""Y"".""Hash"" = ? AND ""Z"".""Size"" = ?
+ LIMIT 1
+ ) B,
+ ""Block"" C
+WHERE
+ ""A"".""BlocksetID"" = ""B"".""BlocksetID""
+ AND
+ ""A"".""BlockID"" = ""C"".""ID""
+ AND
+ ""A"".""Index"" >= ""B"".""BlocklistHashIndex"" * ({0} / {1})
+ AND
+ ""A"".""Index"" < (""B"".""BlocklistHashIndex"" + 1) * ({0} / {1})
+ORDER BY
+ ""A"".""Index""
+
+"
+ ,blocksize, blockhashlength);
+
+ var en = blocklist.GetEnumerator();
+ foreach(var r in cmd.ExecuteReaderEnumerable(query, hash, length))
+ {
+ if (!en.MoveNext())
+ throw new Exception(string.Format("Too few entries in source blocklist with hash {0}", hash));
+ if (en.Current != r.GetString(0))
+ throw new Exception(string.Format("Mismatch in blocklist with hash {0}", hash));
+ }
+
+ if (en.MoveNext())
+ throw new Exception(string.Format("Too many source blocklist entries in {0}", hash));
+ }
+ }
}
}
diff --git a/Duplicati/Library/Main/Operation/RepairHandler.cs b/Duplicati/Library/Main/Operation/RepairHandler.cs
index 3f316aa8d..1448bd9f3 100644
--- a/Duplicati/Library/Main/Operation/RepairHandler.cs
+++ b/Duplicati/Library/Main/Operation/RepairHandler.cs
@@ -185,6 +185,56 @@ namespace Duplicati.Library.Main.Operation
progress++;
m_result.OperationProgressUpdater.UpdateProgress((float)progress / targetProgess);
+
+ // If this is a new index file, we can accept it if it matches our local data
+ // This makes it possible to augment the remote store with new index data
+ if (n.FileType == RemoteVolumeType.Index && m_options.IndexfilePolicy != Options.IndexFileStrategy.None)
+ {
+ try
+ {
+ string hash;
+ long size;
+ using(var tf = backend.GetWithInfo(n.File.Name, out size, out hash))
+ using(var ifr = new IndexVolumeReader(n.CompressionModule, tf, m_options, m_options.BlockhashSize))
+ {
+ foreach(var rv in ifr.Volumes)
+ {
+ string cmphash;
+ long cmpsize;
+ RemoteVolumeType cmptype;
+ RemoteVolumeState cmpstate;
+ if (!db.GetRemoteVolume(rv.Filename, out cmphash, out cmpsize, out cmptype, out cmpstate))
+ throw new Exception(string.Format("Unknown remote file {0} detected", rv.Filename));
+
+ if (!new [] { RemoteVolumeState.Uploading, RemoteVolumeState.Uploaded, RemoteVolumeState.Verified }.Contains(cmpstate))
+ throw new Exception(string.Format("Volume {0} has local state {1}", rv.Filename, cmpstate));
+
+ if (cmphash != rv.Hash || cmpsize != rv.Length || ! new [] { RemoteVolumeState.Uploading, RemoteVolumeState.Uploaded, RemoteVolumeState.Verified }.Contains(cmpstate))
+ throw new Exception(string.Format("Volume {0} hash/size mismatch ({1} - {2}) vs ({3} - {4})", rv.Filename, cmphash, cmpsize, rv.Hash, rv.Length));
+
+ db.CheckAllBlocksAreInVolume(rv.Filename, rv.Blocks);
+ }
+
+ var blocksize = m_options.Blocksize;
+ foreach(var ixb in ifr.BlockLists)
+ db.CheckBlocklistCorrect(ixb.Hash, ixb.Length, ixb.Blocklist, blocksize, hashsize);
+
+ var selfid = db.GetRemoteVolumeID(n.File.Name);
+ foreach(var rv in ifr.Volumes)
+ db.AddIndexBlockLink(selfid, db.GetRemoteVolumeID(rv.Filename), null);
+ }
+
+ // All checks fine, we accept the new index file
+ m_result.AddMessage(string.Format("Accepting new index file {0}", n.File.Name));
+ db.RegisterRemoteVolume(n.File.Name, RemoteVolumeType.Index, size, RemoteVolumeState.Uploading);
+ db.UpdateRemoteVolume(n.File.Name, RemoteVolumeState.Verified, size, hash);
+ continue;
+ }
+ catch (Exception rex)
+ {
+ m_result.AddError(string.Format("Failed to accept new index file: {0}, message: {1}", n.File.Name, rex.Message), rex);
+ }
+ }
if (!m_options.Dryrun)
{
@@ -399,6 +449,8 @@ namespace Duplicati.Library.Main.Operation
using(var db = new LocalRepairDatabase(m_options.Dbpath))
{
db.SetResult(m_result);
+ Utility.UpdateOptionsFromDb(db, m_options);
+
db.FixDuplicateMetahash();
db.FixDuplicateFileentries();
db.FixDuplicateBlocklistHashes(m_options.Blocksize, m_options.BlockhashSize);