diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2020-02-19 21:09:10 +0300 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2020-02-19 21:09:10 +0300 |
commit | 33795139ea8e72756bee3675b4e16387425e6ab1 (patch) | |
tree | 3ca568fca61482e57810ee30ad5ce4b964a82c4e /lib/gitlab/database | |
parent | c7e385e282bcb8505589bce526e692b7bb819ffa (diff) |
Add latest changes from gitlab-org/gitlab@master
Diffstat (limited to 'lib/gitlab/database')
-rw-r--r-- | lib/gitlab/database/migration_helpers.rb | 42 |
1 files changed, 35 insertions, 7 deletions
diff --git a/lib/gitlab/database/migration_helpers.rb b/lib/gitlab/database/migration_helpers.rb index 3b6684b861c..6317e034cfb 100644 --- a/lib/gitlab/database/migration_helpers.rb +++ b/lib/gitlab/database/migration_helpers.rb @@ -688,7 +688,7 @@ module Gitlab start_id, end_id = batch.pluck('MIN(id), MAX(id)').first max_index = index - BackgroundMigrationWorker.perform_in( + migrate_in( index * interval, 'CopyColumn', [table, column, temp_column, start_id, end_id] @@ -697,7 +697,7 @@ module Gitlab # Schedule the renaming of the column to happen (initially) 1 hour after # the last batch finished. - BackgroundMigrationWorker.perform_in( + migrate_in( (max_index * interval) + 1.hour, 'CleanupConcurrentTypeChange', [table, column, temp_column] @@ -779,7 +779,7 @@ module Gitlab start_id, end_id = batch.pluck('MIN(id), MAX(id)').first max_index = index - BackgroundMigrationWorker.perform_in( + migrate_in( index * interval, 'CopyColumn', [table, old_column, new_column, start_id, end_id] @@ -788,7 +788,7 @@ module Gitlab # Schedule the renaming of the column to happen (initially) 1 hour after # the last batch finished. - BackgroundMigrationWorker.perform_in( + migrate_in( (max_index * interval) + 1.hour, 'CleanupConcurrentRename', [table, old_column, new_column] @@ -1024,14 +1024,14 @@ into similar problems in the future (e.g. when new tables are created). # We push multiple jobs at a time to reduce the time spent in # Sidekiq/Redis operations. We're using this buffer based approach so we # don't need to run additional queries for every range. - BackgroundMigrationWorker.bulk_perform_async(jobs) + bulk_migrate_async(jobs) jobs.clear end jobs << [job_class_name, [start_id, end_id]] end - BackgroundMigrationWorker.bulk_perform_async(jobs) unless jobs.empty? + bulk_migrate_async(jobs) unless jobs.empty? end # Queues background migration jobs for an entire table, batched by ID range. @@ -1074,7 +1074,7 @@ into similar problems in the future (e.g. when new tables are created). # `BackgroundMigrationWorker.bulk_perform_in` schedules all jobs for # the same time, which is not helpful in most cases where we wish to # spread the work over time. - BackgroundMigrationWorker.perform_in(delay_interval * index, job_class_name, [start_id, end_id]) + migrate_in(delay_interval * index, job_class_name, [start_id, end_id]) end end @@ -1133,6 +1133,30 @@ into similar problems in the future (e.g. when new tables are created). execute(sql) end + def migrate_async(*args) + with_migration_context do + BackgroundMigrationWorker.perform_async(*args) + end + end + + def migrate_in(*args) + with_migration_context do + BackgroundMigrationWorker.perform_in(*args) + end + end + + def bulk_migrate_in(*args) + with_migration_context do + BackgroundMigrationWorker.bulk_perform_in(*args) + end + end + + def bulk_migrate_async(*args) + with_migration_context do + BackgroundMigrationWorker.bulk_perform_async(*args) + end + end + private def tables_match?(target_table, foreign_key_table) @@ -1191,6 +1215,10 @@ into similar problems in the future (e.g. when new tables are created). your migration class ERROR end + + def with_migration_context(&block) + Gitlab::ApplicationContext.with_context(caller_id: self.class.to_s, &block) + end end end end |