Welcome to mirror list, hosted at ThFree Co, Russian Federation.

BackupHandler.cs « Operation « Main « Library « Duplicati - github.com/duplicati/duplicati.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: f78246cddd0c79940845200afc4118f4ffe5bb84 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
#region Disclaimer / License
// Copyright (C) 2019, The Duplicati Team
// http://www.duplicati.com, info@duplicati.com
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
//
#endregion
using System;
using System.Collections.Generic;
using System.Linq;
using Duplicati.Library.Main.Database;
using Duplicati.Library.Main.Volumes;
using Duplicati.Library.Interface;
using System.Threading.Tasks;
using CoCoL;
using System.Threading;
using Duplicati.Library.Snapshots;
using Duplicati.Library.Utility;
using Duplicati.Library.Common.IO;
using Duplicati.Library.Common;
using Duplicati.Library.Logging;
using Duplicati.Library.Main.Operation.Backup;
using Duplicati.Library.Main.Operation.Common;

namespace Duplicati.Library.Main.Operation
{
    /// <summary>
    /// The backup handler is the primary function,
    /// which performs a backup of the given sources
    /// to the chosen destination
    /// </summary>
    internal class BackupHandler : IDisposable
    {   
        /// <summary>
        /// The tag used for logging
        /// </summary>
        private static readonly string LOGTAG = Logging.Log.LogTagFromType<BackupHandler>();

        private readonly Options m_options;
        private readonly string m_backendurl;

        private LocalBackupDatabase m_database;
        private System.Data.IDbTransaction m_transaction;

        private Library.Utility.IFilter m_filter;
        private Library.Utility.IFilter m_sourceFilter;

        private readonly BackupResults m_result;

        public readonly CancellationTokenSource cancellationTokenSource = new CancellationTokenSource();

        public BackupHandler(string backendurl, Options options, BackupResults results)
        {
            m_options = options;
            m_result = results;
            m_backendurl = backendurl;
                            
            if (options.AllowPassphraseChange)
                throw new UserInformationException(Strings.Common.PassphraseChangeUnsupported, "PassphraseChangeUnsupported");
        }
        
        public static Snapshots.ISnapshotService GetSnapshot(string[] sources, Options options)
        {
            try
            {
                if (options.SnapShotStrategy != Options.OptimizationStrategy.Off)
                    return Duplicati.Library.Snapshots.SnapshotUtility.CreateSnapshot(sources, options.RawOptions);
            }
            catch (Exception ex)
            {
                if (options.SnapShotStrategy == Options.OptimizationStrategy.Required)
                    throw new UserInformationException(Strings.Common.SnapshotFailedError(ex.Message), "SnapshotFailed", ex);
                else if (options.SnapShotStrategy == Options.OptimizationStrategy.On)
                    Logging.Log.WriteWarningMessage(LOGTAG, "SnapshotFailed", ex, Strings.Common.SnapshotFailedError(ex.Message));
                else if (options.SnapShotStrategy == Options.OptimizationStrategy.Auto)
                    Logging.Log.WriteInformationMessage(LOGTAG, "SnapshotFailed", Strings.Common.SnapshotFailedError(ex.Message));
            }

            return Platform.IsClientPosix ?
                (Library.Snapshots.ISnapshotService)new Duplicati.Library.Snapshots.NoSnapshotLinux()
                    :
                new Duplicati.Library.Snapshots.NoSnapshotWindows();
        }

        /// <summary>
        /// Create instance of USN journal service
        /// </summary>
        /// <param name="sources"></param>
        /// <param name="snapshot"></param>
        /// <param name="filter"></param>
        /// <param name="lastfilesetid"></param>
        /// <returns></returns>
        private UsnJournalService GetJournalService(IEnumerable<string> sources, ISnapshotService snapshot, IFilter filter, long lastfilesetid)
        {
            if (m_options.UsnStrategy == Options.OptimizationStrategy.Off) return null;

            var journalData = m_database.GetChangeJournalData(lastfilesetid);
            var service = new UsnJournalService(sources, snapshot, filter, m_options.FileAttributeFilter, m_options.SkipFilesLargerThan,
                journalData, cancellationTokenSource.Token);

            foreach (var volumeData in service.VolumeDataList)
            {
                if (volumeData.IsFullScan)
                {
                    if (volumeData.Exception == null || volumeData.Exception is UsnJournalSoftFailureException)
                    {
                        // soft fail
                        Logging.Log.WriteInformationMessage(LOGTAG, "SkipUsnForVolume",
                            "Performing full scan for volume \"{0}\": {1}", volumeData.Volume, volumeData.Exception?.Message);
                    }
                    else
                    {
                        if (m_options.UsnStrategy == Options.OptimizationStrategy.Auto)
                        {
                            Logging.Log.WriteInformationMessage(LOGTAG, "FailedToUseChangeJournal",
                                "Failed to use change journal for volume \"{0}\": {1}", volumeData.Volume, volumeData.Exception.Message);
                        }
                        else if (m_options.UsnStrategy == Options.OptimizationStrategy.On)
                        {
                            Logging.Log.WriteWarningMessage(LOGTAG, "FailedToUseChangeJournal", volumeData.Exception,
                                "Failed to use change journal for volume \"{0}\": {1}", volumeData.Volume, volumeData.Exception.Message);
                        }
                        else
                            throw volumeData.Exception;
                    }
                }
            }

            return service;
        }

        private void PreBackupVerify(BackendManager backend, string protectedfile)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PreBackupVerify);
            using(new Logging.Timer(LOGTAG, "PreBackupVerify", "PreBackupVerify"))
            {
                try
                {
                    if (m_options.NoBackendverification)
                    {
                        FilelistProcessor.VerifyLocalList(backend, m_database);
                        UpdateStorageStatsFromDatabase();
                    }
                    else
                        FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter, new string[] { protectedfile });
                }
                catch (Exception ex)
                {
                    if (m_options.AutoCleanup)
                    {
                        Logging.Log.WriteWarningMessage(LOGTAG, "BackendVerifyFailedAttemptingCleanup", ex, "Backend verification failed, attempting automatic cleanup");
                        m_result.RepairResults = new RepairResults(m_result);
                        new RepairHandler(backend.BackendUrl, m_options, (RepairResults)m_result.RepairResults).Run();

                        Logging.Log.WriteInformationMessage(LOGTAG, "BackendCleanupFinished", "Backend cleanup finished, retrying verification");
                        FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter, new string[] { protectedfile });
                    }
                    else
                        throw;
                }
            }
        }

        /// <summary>
        /// Performs the bulk of work by starting all relevant processes
        /// </summary>
        private static async Task RunMainOperation(IEnumerable<string> sources, Snapshots.ISnapshotService snapshot, UsnJournalService journalService, Backup.BackupDatabase database, Backup.BackupStatsCollector stats, Options options, IFilter sourcefilter, IFilter filter, BackupResults result, Common.ITaskReader taskreader, long filesetid, long lastfilesetid, CancellationToken token)
        {
            using (new Logging.Timer(LOGTAG, "BackupMainOperation", "BackupMainOperation"))
            {
                // Make sure the CompressionHints table is initialized, otherwise all workers will initialize it
                var unused = options.CompressionHints.Count;

                Task all;
                using (new ChannelScope())
                {
                    all = Task.WhenAll(
                        new[]
                            {
                                    Backup.DataBlockProcessor.Run(database, options, taskreader),
                                    Backup.FileBlockProcessor.Run(snapshot, options, database, stats, taskreader, token),
                                    Backup.StreamBlockSplitter.Run(options, database, taskreader),
                                    Backup.FileEnumerationProcess.Run(sources, snapshot, journalService,
                                        options.FileAttributeFilter, sourcefilter, filter, options.SymlinkPolicy,
                                        options.HardlinkPolicy, options.ExcludeEmptyFolders, options.IgnoreFilenames,
                                        options.ChangedFilelist, taskreader, token),
                                    Backup.FilePreFilterProcess.Run(snapshot, options, stats, database),
                                    Backup.MetadataPreProcess.Run(snapshot, options, database, lastfilesetid, token),
                                    Backup.SpillCollectorProcess.Run(options, database, taskreader),
                                    Backup.ProgressHandler.Run(result)
                            }
                            // Spawn additional block hashers
                            .Union(
                                Enumerable.Range(0, options.ConcurrencyBlockHashers - 1).Select(x =>
                                    Backup.StreamBlockSplitter.Run(options, database, taskreader))
                            )
                            // Spawn additional compressors
                            .Union(
                                Enumerable.Range(0, options.ConcurrencyCompressors - 1).Select(x =>
                                    Backup.DataBlockProcessor.Run(database, options, taskreader))
                            )
                    );
                }

                await all.ConfigureAwait(false);

                if (options.ChangedFilelist != null && options.ChangedFilelist.Length >= 1)
                {
                    await database.AppendFilesFromPreviousSetAsync(options.DeletedFilelist);
                }
                else if (journalService != null)
                {
                    // append files from previous fileset, unless part of modifiedSources, which we've just scanned
                    await database.AppendFilesFromPreviousSetWithPredicateAsync((path, fileSize) =>
                    {
                        if (journalService.IsPathEnumerated(path))
                            return true;

                        if (fileSize >= 0)
                        {
                            stats.AddExaminedFile(fileSize);
                        }
                        return false;
                    });

                    // store journal data in database, unless job is being canceled
                    if (!token.IsCancellationRequested)
                    {
                        var data = journalService.VolumeDataList.Where(p => p.JournalData != null).Select(p => p.JournalData).ToList();
                        if (data.Any())
                        {
                            // always record change journal data for current fileset (entry may be dropped later if nothing is uploaded)
                            await database.CreateChangeJournalDataAsync(data);

                            // update the previous fileset's change journal entry to resume at this point in case nothing was backed up
                            await database.UpdateChangeJournalDataAsync(data, lastfilesetid);
                        }
                    }
                }

                if (token.IsCancellationRequested)
                {
                    result.PartialBackup = true;
                    Log.WriteWarningMessage(LOGTAG, "CancellationRequested", null, "Cancellation was requested by user.");
                }
                else
                {
                    result.PartialBackup = false;
                    await database.UpdateFilesetAndMarkAsFullBackupAsync(filesetid);
                }

                result.OperationProgressUpdater.UpdatefileCount(result.ExaminedFiles, result.SizeOfExaminedFiles, true);
            }
        }

        private void CompactIfRequired(BackendManager backend, long lastVolumeSize)
        {
            var currentIsSmall = lastVolumeSize != -1 && lastVolumeSize <= m_options.SmallFileSize;

            if (m_options.KeepTime.Ticks > 0 || m_options.KeepVersions != 0 || m_options.RetentionPolicy.Count > 0)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Delete);
                m_result.DeleteResults = new DeleteResults(m_result);
                using(var db = new LocalDeleteDatabase(m_database))
                    new DeleteHandler(backend.BackendUrl, m_options, (DeleteResults)m_result.DeleteResults).DoRun(db, ref m_transaction, true, currentIsSmall, backend);

            }
            else if (currentIsSmall && !m_options.NoAutoCompact)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Compact);
                m_result.CompactResults = new CompactResults(m_result);
                using(var db = new LocalDeleteDatabase(m_database))
                    new CompactHandler(backend.BackendUrl, m_options, (CompactResults)m_result.CompactResults).DoCompact(db, true, ref m_transaction, backend);
            }
        }

        private void PostBackupVerification(string currentFilelistVolume)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupVerify);
            using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
            {
                using (new Logging.Timer(LOGTAG, "AfterBackupVerify", "AfterBackupVerify"))
                    FilelistProcessor.VerifyRemoteList(backend, m_options, m_database, m_result.BackendWriter, new string[] { currentFilelistVolume });
                backend.WaitForComplete(m_database, null);
            }

            long remoteVolumeCount = m_database.GetRemoteVolumes().LongCount(x => x.State == RemoteVolumeState.Verified);
            long samplesToTest = Math.Max(m_options.BackupTestSampleCount, (long)Math.Round(remoteVolumeCount * (m_options.BackupTestPercentage / 100D), MidpointRounding.AwayFromZero));
            if (samplesToTest > 0 && remoteVolumeCount > 0)
            {
                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_PostBackupTest);
                m_result.TestResults = new TestResults(m_result);

                using(var testdb = new LocalTestDatabase(m_database))
                using(var backend = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, testdb))
                    new TestHandler(m_backendurl, m_options, (TestResults)m_result.TestResults)
                        .DoRun(samplesToTest, testdb, backend);
            }
        }

        /// <summary>
        /// Handler for computing backend statistics, without relying on a remote folder listing
        /// </summary>
        private void UpdateStorageStatsFromDatabase()
        {
            if (m_result.BackendWriter != null)
            {
                m_result.BackendWriter.KnownFileCount = m_database.GetRemoteVolumes().Count();
                m_result.BackendWriter.KnownFileSize = m_database.GetRemoteVolumes().Select(x => Math.Max(0, x.Size)).Sum();

                m_result.BackendWriter.UnknownFileCount = 0;
                m_result.BackendWriter.UnknownFileSize = 0;

                m_result.BackendWriter.BackupListCount = m_database.FilesetTimes.Count();
                m_result.BackendWriter.LastBackupDate = m_database.FilesetTimes.FirstOrDefault().Value.ToLocalTime();

                // TODO: If we have a BackendManager, we should query through that
                using (var backend = DynamicLoader.BackendLoader.GetBackend(m_backendurl, m_options.RawOptions))
                {
                    if (backend is IQuotaEnabledBackend enabledBackend)
                    {
                        Library.Interface.IQuotaInfo quota = enabledBackend.Quota;
                        if (quota != null)
                        {
                            m_result.BackendWriter.TotalQuotaSpace = quota.TotalQuotaSpace;
                            m_result.BackendWriter.FreeQuotaSpace = quota.FreeQuotaSpace;
                        }
                    }
                }
                
                m_result.BackendWriter.AssignedQuotaSpace = m_options.QuotaSize;
            }
        }

        public void Run(string[] sources, Library.Utility.IFilter filter, CancellationToken token)
        {
            RunAsync(sources, filter, token).WaitForTaskOrThrow();
        }

        private static Exception BuildException(Exception source, params Task[] tasks)
        {
            if (tasks == null || tasks.Length == 0)
                return source;

            var ex = new List<Exception>();
            ex.Add(source);

            foreach(var t in tasks)
                if (t != null)
                {
                    if (!t.IsCompleted && !t.IsFaulted && !t.IsCanceled)
                        t.Wait(500);

                    if (t.IsFaulted && t.Exception != null)
                        ex.Add(t.Exception);
                }

            if (ex.Count == 1)
                return ex.First();
            else
                return new AggregateException(ex.First().Message, ex);
        }

        private static async Task<long> FlushBackend(BackupResults result, IWriteChannel<Backup.IUploadRequest> uploadtarget, Task uploader)
        {
            // Wait for upload completion
            result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);

            if (await uploadtarget.IsRetiredAsync)
            {
                await uploader.ConfigureAwait(false);
                return -1;
            }

            var flushReq = new Backup.FlushRequest();

            await uploadtarget.WriteAsync(flushReq).ConfigureAwait(false);
            await uploader.ConfigureAwait(false);

            // Grab the size of the last uploaded volume
            return await flushReq.LastWriteSizeAsync;
        }

        private async Task RunAsync(string[] sources, Library.Utility.IFilter filter, CancellationToken token)
        {
            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Begin);                        
            
            // New isolated scope for each operation
            using(new IsolatedChannelScope())
            using(m_database = new LocalBackupDatabase(m_options.Dbpath, m_options))
            {
                m_result.SetDatabase(m_database);
                m_result.Dryrun = m_options.Dryrun;

                // Check the database integrity
                Utility.UpdateOptionsFromDb(m_database, m_options);
                Utility.VerifyParameters(m_database, m_options);

                var probe_path = m_database.GetFirstPath();
                if (probe_path != null && Util.GuessDirSeparator(probe_path) != Util.DirectorySeparatorString)
                    throw new UserInformationException(string.Format("The backup contains files that belong to another operating system. Proceeding with a backup would cause the database to contain paths from two different operation systems, which is not supported. To proceed without losing remote data, delete all filesets and make sure the --{0} option is set, then run the backup again to re-use the existing data on the remote store.", "no-auto-compact"), "CrossOsDatabaseReuseNotSupported");

                if (m_database.PartiallyRecreated)
                    throw new UserInformationException("The database was only partially recreated. This database may be incomplete and the repair process is not allowed to alter remote files as that could result in data loss.", "DatabaseIsPartiallyRecreated");
                
                if (m_database.RepairInProgress)
                    throw new UserInformationException("The database was attempted repaired, but the repair did not complete. This database may be incomplete and the backup process cannot continue. You may delete the local database and attempt to repair it again.", "DatabaseRepairInProgress");

                // If there is no filter, we set an empty filter to simplify the code
                // If there is a filter, we make sure that the sources are included
                m_filter = filter ?? new Library.Utility.FilterExpression();
                m_sourceFilter = new Library.Utility.FilterExpression(sources, true);

                Task parallelScanner = null;
                Task uploaderTask = null;
                try
                {
                    // Setup runners and instances here
                    using(var db = new Backup.BackupDatabase(m_database, m_options))
                    using(var backendManager = new BackendManager(m_backendurl, m_options, m_result.BackendWriter, m_database))
                    using(var filesetvolume = new FilesetVolumeWriter(m_options, m_database.OperationTimestamp))
                    using(var stats = new Backup.BackupStatsCollector(m_result))
                    // Keep a reference to these channels to avoid shutdown
                    using(var uploadtarget = ChannelManager.GetChannel(Backup.Channels.BackendRequest.ForWrite))
                    {
                        long filesetid;
                        var counterToken = new CancellationTokenSource();
                        var uploader = new Backup.BackendUploader(() => DynamicLoader.BackendLoader.GetBackend(m_backendurl, m_options.RawOptions), m_options, db, m_result.TaskReader, stats);
                        using (var snapshot = GetSnapshot(sources, m_options))
                        {
                            try
                            {
                                // Make sure the database is sane
                                await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, !m_options.DisableFilelistConsistencyChecks);

                                // Start the uploader process
                                uploaderTask = uploader.Run();

                                // If we have an interrupted backup, grab the fileset
                                string lastTempFilelist = null;
                                long lastTempFilesetId = -1;
                                if (!m_options.DisableSyntheticFilelist)
                                {
                                    var candidates = (await db.GetIncompleteFilesetsAsync()).OrderBy(x => x.Value).ToArray();
                                    if (candidates.Any())
                                    {
                                        lastTempFilesetId = candidates.Last().Key;
                                        lastTempFilelist = m_database.GetRemoteVolumeFromFilesetID(lastTempFilesetId).Name;
                                    }
                                }

                                // TODO: Rewrite to using the uploader process, or the BackendHandler interface
                                // Do a remote verification, unless disabled
                                PreBackupVerify(backendManager, lastTempFilelist);

                                // If the previous backup was interrupted, send a synthetic list
                                await Backup.UploadSyntheticFilelist.Run(db, m_options, m_result, m_result.TaskReader, lastTempFilelist, lastTempFilesetId);

                                // Grab the previous backup ID, if any
                                var prevfileset = m_database.FilesetTimes.FirstOrDefault();
                                if (prevfileset.Value.ToUniversalTime() > m_database.OperationTimestamp.ToUniversalTime())
                                    throw new Exception(string.Format("The previous backup has time {0}, but this backup has time {1}. Something is wrong with the clock.", prevfileset.Value.ToLocalTime(), m_database.OperationTimestamp.ToLocalTime()));
                                
                                var lastfilesetid = prevfileset.Value.Ticks == 0 ? -1 : prevfileset.Key;

                                // Rebuild any index files that are missing
                                await Backup.RecreateMissingIndexFiles.Run(db, m_options, m_result.TaskReader);

                                // Prepare the operation by registering the filelist
                                m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_ProcessingFiles);

                                var repcnt = 0;
                                while(repcnt < 100 && await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
                                    filesetvolume.ResetRemoteFilename(m_options, m_database.OperationTimestamp.AddSeconds(repcnt++));

                                if (await db.GetRemoteVolumeIDAsync(filesetvolume.RemoteFilename) >= 0)
                                    throw new Exception("Unable to generate a unique fileset name");

                                var filesetvolumeid = await db.RegisterRemoteVolumeAsync(filesetvolume.RemoteFilename, RemoteVolumeType.Files, RemoteVolumeState.Temporary);
                                filesetid = await db.CreateFilesetAsync(filesetvolumeid, VolumeBase.ParseFilename(filesetvolume.RemoteFilename).Time);

                                // create USN-based scanner if enabled
                                var journalService = GetJournalService(sources, snapshot, filter, lastfilesetid);

                                // Start parallel scan, or use the database
                                if (m_options.DisableFileScanner)
                                {
                                    var d = m_database.GetLastBackupFileCountAndSize();
                                    m_result.OperationProgressUpdater.UpdatefileCount(d.Item1, d.Item2, true);
                                }
                                else
                                {
                                    parallelScanner = Backup.CountFilesHandler.Run(sources, snapshot, journalService, m_result, m_options, m_sourceFilter, m_filter, m_result.TaskReader, counterToken.Token);
                                }

                                // Run the backup operation
                                if (await m_result.TaskReader.ProgressAsync)
                                {
                                    await RunMainOperation(sources, snapshot, journalService, db, stats, m_options, m_sourceFilter, m_filter, m_result, m_result.TaskReader, filesetid, lastfilesetid, token).ConfigureAwait(false);
                                }
                            }
                            finally
                            {
                                //If the scanner is still running for some reason, make sure we kill it now 
                                counterToken.Cancel();
                            }
                        }

                        // Add the fileset file to the dlist file
                        filesetvolume.CreateFilesetFile(!token.IsCancellationRequested);

                        // Ensure the database is in a sane state after adding data
                        using (new Logging.Timer(LOGTAG, "VerifyConsistency", "VerifyConsistency"))
                            await db.VerifyConsistencyAsync(m_options.Blocksize, m_options.BlockhashSize, false);

                        // Send the actual filelist
                        if (await m_result.TaskReader.ProgressAsync)
                            await Backup.UploadRealFilelist.Run(m_result, db, m_options, filesetvolume, filesetid, m_result.TaskReader);

                        // Wait for upload completion
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_WaitForUpload);
                        var lastVolumeSize = await FlushBackend(m_result, uploadtarget, uploaderTask).ConfigureAwait(false);

                        // Make sure we have the database up-to-date
                        await db.CommitTransactionAsync("CommitAfterUpload", false);

                        // TODO: Remove this later
                        m_transaction = m_database.BeginTransaction();
    		                                        
                        if (await m_result.TaskReader.ProgressAsync)
                            CompactIfRequired(backendManager, lastVolumeSize);

                        if (m_options.UploadVerificationFile && await m_result.TaskReader.ProgressAsync)
                        {
                            m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_VerificationUpload);
                            FilelistProcessor.UploadVerificationFile(backendManager.BackendUrl, m_options, m_result.BackendWriter, m_database, m_transaction);
                        }

                        if (m_options.Dryrun)
                        {
                            m_transaction.Rollback();
                            m_transaction = null;
                        }
                        else
                        {
                            using(new Logging.Timer(LOGTAG, "CommitFinalizingBackup", "CommitFinalizingBackup"))
                                m_transaction.Commit();
                                
                            m_transaction = null;

                            if (m_result.TaskControlRendevouz() != TaskControlState.Abort)
                            {
                                if (m_options.NoBackendverification)
                                    UpdateStorageStatsFromDatabase();
                                else
                                    PostBackupVerification(filesetvolume.RemoteFilename);
                            }
                        }
                        
                        m_database.WriteResults();                    
                        m_database.PurgeLogData(m_options.LogRetention);
                        m_database.PurgeDeletedVolumes(DateTime.UtcNow);

                        if (m_options.AutoVacuum)
                        {
                            m_result.VacuumResults = new VacuumResults(m_result);
                            new VacuumHandler(m_options, (VacuumResults)m_result.VacuumResults).Run();
                        }
                        m_result.OperationProgressUpdater.UpdatePhase(OperationPhase.Backup_Complete);
                        return;
                    }
                }
                catch (Exception ex)
                {
                    var aex = BuildException(ex, uploaderTask, parallelScanner);
                    Logging.Log.WriteErrorMessage(LOGTAG, "FatalError", ex, "Fatal error");
                    if (aex == ex)
                        throw;
                    
                    throw aex;
                }
                finally
                {
                    if (parallelScanner != null && !parallelScanner.IsCompleted)
                        parallelScanner.Wait(500);

                    // TODO: We want to commit? always?
                    if (m_transaction != null)
                        try { m_transaction.Rollback(); }
                        catch (Exception ex) { Logging.Log.WriteErrorMessage(LOGTAG, "RollbackError", ex, "Rollback error: {0}", ex.Message); }                
                }
            }
        }

        public void Dispose()
        {
            if (m_result.EndTime.Ticks == 0)
                m_result.EndTime = DateTime.UtcNow;
        }
    }
}