Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2021-05-19 18:44:42 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2021-05-19 18:44:42 +0300
commit4555e1b21c365ed8303ffb7a3325d773c9b8bf31 (patch)
tree5423a1c7516cffe36384133ade12572cf709398d /spec/lib/gitlab/database
parente570267f2f6b326480d284e0164a6464ba4081bc (diff)
Add latest changes from gitlab-org/gitlab@13-12-stable-eev13.12.0-rc42
Diffstat (limited to 'spec/lib/gitlab/database')
-rw-r--r--spec/lib/gitlab/database/background_migration/batch_optimizer_spec.rb102
-rw-r--r--spec/lib/gitlab/database/background_migration/batched_job_spec.rb87
-rw-r--r--spec/lib/gitlab/database/background_migration/batched_migration_runner_spec.rb110
-rw-r--r--spec/lib/gitlab/database/background_migration/batched_migration_spec.rb120
-rw-r--r--spec/lib/gitlab/database/background_migration/batched_migration_wrapper_spec.rb106
-rw-r--r--spec/lib/gitlab/database/migration_helpers/cascading_namespace_settings_spec.rb50
-rw-r--r--spec/lib/gitlab/database/migration_helpers_spec.rb232
-rw-r--r--spec/lib/gitlab/database/migrations/instrumentation_spec.rb2
-rw-r--r--spec/lib/gitlab/database/migrations/observers/query_log_spec.rb38
-rw-r--r--spec/lib/gitlab/database/partitioning_migration_helpers/foreign_key_helpers_spec.rb1
-rw-r--r--spec/lib/gitlab/database/partitioning_migration_helpers/index_helpers_spec.rb2
-rw-r--r--spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb1
-rw-r--r--spec/lib/gitlab/database/reindexing/concurrent_reindex_spec.rb36
-rw-r--r--spec/lib/gitlab/database/schema_cache_with_renamed_table_spec.rb83
-rw-r--r--spec/lib/gitlab/database/with_lock_retries_outside_transaction_spec.rb244
-rw-r--r--spec/lib/gitlab/database/with_lock_retries_spec.rb18
16 files changed, 1131 insertions, 101 deletions
diff --git a/spec/lib/gitlab/database/background_migration/batch_optimizer_spec.rb b/spec/lib/gitlab/database/background_migration/batch_optimizer_spec.rb
new file mode 100644
index 00000000000..95863ce3765
--- /dev/null
+++ b/spec/lib/gitlab/database/background_migration/batch_optimizer_spec.rb
@@ -0,0 +1,102 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Database::BackgroundMigration::BatchOptimizer do
+ describe '#optimize' do
+ subject { described_class.new(migration, number_of_jobs: number_of_jobs, ema_alpha: ema_alpha).optimize! }
+
+ let(:migration) { create(:batched_background_migration, batch_size: batch_size, sub_batch_size: 100, interval: 120) }
+
+ let(:batch_size) { 10_000 }
+
+ let_it_be(:number_of_jobs) { 5 }
+ let_it_be(:ema_alpha) { 0.4 }
+
+ let_it_be(:target_efficiency) { described_class::TARGET_EFFICIENCY.max }
+
+ def mock_efficiency(eff)
+ expect(migration).to receive(:smoothed_time_efficiency).with(number_of_jobs: number_of_jobs, alpha: ema_alpha).and_return(eff)
+ end
+
+ it 'with unknown time efficiency, it keeps the batch size' do
+ mock_efficiency(nil)
+
+ expect { subject }.not_to change { migration.reload.batch_size }
+ end
+
+ it 'with a time efficiency of 95%, it keeps the batch size' do
+ mock_efficiency(0.95)
+
+ expect { subject }.not_to change { migration.reload.batch_size }
+ end
+
+ it 'with a time efficiency of 90%, it keeps the batch size' do
+ mock_efficiency(0.9)
+
+ expect { subject }.not_to change { migration.reload.batch_size }
+ end
+
+ it 'with a time efficiency of 85%, it increases the batch size' do
+ time_efficiency = 0.85
+
+ mock_efficiency(time_efficiency)
+
+ new_batch_size = ((target_efficiency / time_efficiency) * batch_size).to_i
+
+ expect { subject }.to change { migration.reload.batch_size }.from(batch_size).to(new_batch_size)
+ end
+
+ it 'with a time efficiency of 110%, it decreases the batch size' do
+ time_efficiency = 1.1
+
+ mock_efficiency(time_efficiency)
+
+ new_batch_size = ((target_efficiency / time_efficiency) * batch_size).to_i
+
+ expect { subject }.to change { migration.reload.batch_size }.from(batch_size).to(new_batch_size)
+ end
+
+ context 'reaching the upper limit for an increase' do
+ it 'caps the batch size multiplier at 20% when increasing' do
+ time_efficiency = 0.1 # this would result in a factor of 10 if not limited
+
+ mock_efficiency(time_efficiency)
+
+ new_batch_size = (1.2 * batch_size).to_i
+
+ expect { subject }.to change { migration.reload.batch_size }.from(batch_size).to(new_batch_size)
+ end
+
+ it 'does not limit the decrease multiplier' do
+ time_efficiency = 10
+
+ mock_efficiency(time_efficiency)
+
+ new_batch_size = (0.1 * batch_size).to_i
+
+ expect { subject }.to change { migration.reload.batch_size }.from(batch_size).to(new_batch_size)
+ end
+ end
+
+ context 'reaching the upper limit for the batch size' do
+ let(:batch_size) { 1_950_000 }
+
+ it 'caps the batch size at 10M' do
+ mock_efficiency(0.7)
+
+ expect { subject }.to change { migration.reload.batch_size }.to(2_000_000)
+ end
+ end
+
+ context 'reaching the lower limit for the batch size' do
+ let(:batch_size) { 1_050 }
+
+ it 'caps the batch size at 1k' do
+ mock_efficiency(1.1)
+
+ expect { subject }.to change { migration.reload.batch_size }.to(1_000)
+ end
+ end
+ end
+end
diff --git a/spec/lib/gitlab/database/background_migration/batched_job_spec.rb b/spec/lib/gitlab/database/background_migration/batched_job_spec.rb
index 1020aafcf08..78e0b7627e9 100644
--- a/spec/lib/gitlab/database/background_migration/batched_job_spec.rb
+++ b/spec/lib/gitlab/database/background_migration/batched_job_spec.rb
@@ -9,6 +9,42 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedJob, type: :model d
it { is_expected.to belong_to(:batched_migration).with_foreign_key(:batched_background_migration_id) }
end
+ describe 'scopes' do
+ let_it_be(:fixed_time) { Time.new(2021, 04, 27, 10, 00, 00, 00) }
+
+ let_it_be(:pending_job) { create(:batched_background_migration_job, status: :pending, updated_at: fixed_time) }
+ let_it_be(:running_job) { create(:batched_background_migration_job, status: :running, updated_at: fixed_time) }
+ let_it_be(:stuck_job) { create(:batched_background_migration_job, status: :pending, updated_at: fixed_time - described_class::STUCK_JOBS_TIMEOUT) }
+ let_it_be(:failed_job) { create(:batched_background_migration_job, status: :failed, attempts: 1) }
+
+ before_all do
+ create(:batched_background_migration_job, status: :failed, attempts: described_class::MAX_ATTEMPTS)
+ create(:batched_background_migration_job, status: :succeeded)
+ end
+
+ before do
+ travel_to fixed_time
+ end
+
+ describe '.active' do
+ it 'returns active jobs' do
+ expect(described_class.active).to contain_exactly(pending_job, running_job, stuck_job)
+ end
+ end
+
+ describe '.stuck' do
+ it 'returns stuck jobs' do
+ expect(described_class.stuck).to contain_exactly(stuck_job)
+ end
+ end
+
+ describe '.retriable' do
+ it 'returns retriable jobs' do
+ expect(described_class.retriable).to contain_exactly(failed_job, stuck_job)
+ end
+ end
+ end
+
describe 'delegated batched_migration attributes' do
let(:batched_job) { build(:batched_background_migration_job) }
let(:batched_migration) { batched_job.batched_migration }
@@ -47,4 +83,55 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedJob, type: :model d
end
end
end
+
+ describe '#time_efficiency' do
+ subject { job.time_efficiency }
+
+ let(:migration) { build(:batched_background_migration, interval: 120.seconds) }
+ let(:job) { build(:batched_background_migration_job, status: :succeeded, batched_migration: migration) }
+
+ context 'when job has not yet succeeded' do
+ let(:job) { build(:batched_background_migration_job, status: :running) }
+
+ it 'returns nil' do
+ expect(subject).to be_nil
+ end
+ end
+
+ context 'when finished_at is not set' do
+ it 'returns nil' do
+ job.started_at = Time.zone.now
+
+ expect(subject).to be_nil
+ end
+ end
+
+ context 'when started_at is not set' do
+ it 'returns nil' do
+ job.finished_at = Time.zone.now
+
+ expect(subject).to be_nil
+ end
+ end
+
+ context 'when job has finished' do
+ it 'returns ratio of duration to interval, here: 0.5' do
+ freeze_time do
+ job.started_at = Time.zone.now - migration.interval / 2
+ job.finished_at = Time.zone.now
+
+ expect(subject).to eq(0.5)
+ end
+ end
+
+ it 'returns ratio of duration to interval, here: 1' do
+ freeze_time do
+ job.started_at = Time.zone.now - migration.interval
+ job.finished_at = Time.zone.now
+
+ expect(subject).to eq(1)
+ end
+ end
+ end
+ end
end
diff --git a/spec/lib/gitlab/database/background_migration/batched_migration_runner_spec.rb b/spec/lib/gitlab/database/background_migration/batched_migration_runner_spec.rb
index 7d0e10b62c6..9f0493ab0d7 100644
--- a/spec/lib/gitlab/database/background_migration/batched_migration_runner_spec.rb
+++ b/spec/lib/gitlab/database/background_migration/batched_migration_runner_spec.rb
@@ -17,9 +17,9 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
end
it 'marks the migration as finished' do
- relation = Gitlab::Database::BackgroundMigration::BatchedMigration.finished.where(id: migration.id)
+ runner.run_migration_job(migration)
- expect { runner.run_migration_job(migration) }.to change { relation.count }.by(1)
+ expect(migration.reload).to be_finished
end
end
@@ -50,6 +50,15 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
batch_size: migration.batch_size,
sub_batch_size: migration.sub_batch_size)
end
+
+ it 'optimizes the migration after executing the job' do
+ migration.update!(min_value: event1.id, max_value: event2.id)
+
+ expect(migration_wrapper).to receive(:perform).ordered
+ expect(migration).to receive(:optimize!).ordered
+
+ runner.run_migration_job(migration)
+ end
end
context 'when the batch maximum exceeds the migration maximum' do
@@ -83,7 +92,7 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
let!(:event3) { create(:event) }
let!(:migration) do
- create(:batched_background_migration, :active, batch_size: 2, min_value: event1.id, max_value: event3.id)
+ create(:batched_background_migration, :active, batch_size: 2, min_value: event1.id, max_value: event2.id)
end
let!(:previous_job) do
@@ -92,14 +101,24 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
min_value: event1.id,
max_value: event2.id,
batch_size: 2,
- sub_batch_size: 1)
+ sub_batch_size: 1,
+ status: :succeeded
+ )
end
let(:job_relation) do
Gitlab::Database::BackgroundMigration::BatchedJob.where(batched_background_migration_id: migration.id)
end
+ context 'when the migration has no batches remaining' do
+ it_behaves_like 'it has completed the migration'
+ end
+
context 'when the migration has batches to process' do
+ before do
+ migration.update!(max_value: event3.id)
+ end
+
it 'runs the migration job for the next batch' do
expect(migration_wrapper).to receive(:perform) do |job_record|
expect(job_record).to eq(job_relation.last)
@@ -123,17 +142,82 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
end
end
- context 'when the migration has no batches remaining' do
+ context 'when migration has failed jobs' do
before do
- create(:batched_background_migration_job,
- batched_migration: migration,
- min_value: event3.id,
- max_value: event3.id,
- batch_size: 2,
- sub_batch_size: 1)
+ previous_job.update!(status: :failed)
end
- it_behaves_like 'it has completed the migration'
+ it 'retries the failed job' do
+ expect(migration_wrapper).to receive(:perform) do |job_record|
+ expect(job_record).to eq(previous_job)
+ end
+
+ expect { runner.run_migration_job(migration) }.to change { job_relation.count }.by(0)
+ end
+
+ context 'when failed job has reached the maximum number of attempts' do
+ before do
+ previous_job.update!(attempts: Gitlab::Database::BackgroundMigration::BatchedJob::MAX_ATTEMPTS)
+ end
+
+ it 'marks the migration as failed' do
+ expect(migration_wrapper).not_to receive(:perform)
+
+ expect { runner.run_migration_job(migration) }.to change { job_relation.count }.by(0)
+
+ expect(migration).to be_failed
+ end
+ end
+ end
+
+ context 'when migration has stuck jobs' do
+ before do
+ previous_job.update!(status: :running, updated_at: 1.hour.ago - Gitlab::Database::BackgroundMigration::BatchedJob::STUCK_JOBS_TIMEOUT)
+ end
+
+ it 'retries the stuck job' do
+ expect(migration_wrapper).to receive(:perform) do |job_record|
+ expect(job_record).to eq(previous_job)
+ end
+
+ expect { runner.run_migration_job(migration.reload) }.to change { job_relation.count }.by(0)
+ end
+ end
+
+ context 'when migration has possible stuck jobs' do
+ before do
+ previous_job.update!(status: :running, updated_at: 1.hour.from_now - Gitlab::Database::BackgroundMigration::BatchedJob::STUCK_JOBS_TIMEOUT)
+ end
+
+ it 'keeps the migration active' do
+ expect(migration_wrapper).not_to receive(:perform)
+
+ expect { runner.run_migration_job(migration) }.to change { job_relation.count }.by(0)
+
+ expect(migration.reload).to be_active
+ end
+ end
+
+ context 'when the migration has batches to process and failed jobs' do
+ before do
+ migration.update!(max_value: event3.id)
+ previous_job.update!(status: :failed)
+ end
+
+ it 'runs next batch then retries the failed job' do
+ expect(migration_wrapper).to receive(:perform) do |job_record|
+ expect(job_record).to eq(job_relation.last)
+ job_record.update!(status: :succeeded)
+ end
+
+ expect { runner.run_migration_job(migration) }.to change { job_relation.count }.by(1)
+
+ expect(migration_wrapper).to receive(:perform) do |job_record|
+ expect(job_record).to eq(previous_job)
+ end
+
+ expect { runner.run_migration_job(migration.reload) }.to change { job_relation.count }.by(0)
+ end
end
end
end
@@ -180,10 +264,12 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationRunner do
it 'runs all jobs inline until finishing the migration' do
expect(migration_wrapper).to receive(:perform) do |job_record|
expect(job_record).to eq(job_relation.first)
+ job_record.update!(status: :succeeded)
end
expect(migration_wrapper).to receive(:perform) do |job_record|
expect(job_record).to eq(job_relation.last)
+ job_record.update!(status: :succeeded)
end
expect { runner.run_entire_migration(migration) }.to change { job_relation.count }.by(2)
diff --git a/spec/lib/gitlab/database/background_migration/batched_migration_spec.rb b/spec/lib/gitlab/database/background_migration/batched_migration_spec.rb
index 261e23d0745..43e34325419 100644
--- a/spec/lib/gitlab/database/background_migration/batched_migration_spec.rb
+++ b/spec/lib/gitlab/database/background_migration/batched_migration_spec.rb
@@ -119,7 +119,13 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigration, type: :m
end
describe '#create_batched_job!' do
- let(:batched_migration) { create(:batched_background_migration) }
+ let(:batched_migration) do
+ create(:batched_background_migration,
+ batch_size: 999,
+ sub_batch_size: 99,
+ pause_ms: 250
+ )
+ end
it 'creates a batched_job with the correct batch configuration' do
batched_job = batched_migration.create_batched_job!(1, 5)
@@ -128,7 +134,9 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigration, type: :m
min_value: 1,
max_value: 5,
batch_size: batched_migration.batch_size,
- sub_batch_size: batched_migration.sub_batch_size)
+ sub_batch_size: batched_migration.sub_batch_size,
+ pause_ms: 250
+ )
end
end
@@ -196,6 +204,22 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigration, type: :m
it_behaves_like 'an attr_writer that demodulizes assigned class names', :batch_class_name
end
+ describe '#migrated_tuple_count' do
+ subject { batched_migration.migrated_tuple_count }
+
+ let(:batched_migration) { create(:batched_background_migration) }
+
+ before do
+ create_list(:batched_background_migration_job, 5, status: :succeeded, batch_size: 1_000, batched_migration: batched_migration)
+ create_list(:batched_background_migration_job, 1, status: :running, batch_size: 1_000, batched_migration: batched_migration)
+ create_list(:batched_background_migration_job, 1, status: :failed, batch_size: 1_000, batched_migration: batched_migration)
+ end
+
+ it 'sums the batch_size of succeeded jobs' do
+ expect(subject).to eq(5_000)
+ end
+ end
+
describe '#prometheus_labels' do
let(:batched_migration) { create(:batched_background_migration, job_class_name: 'TestMigration', table_name: 'foo', column_name: 'bar') }
@@ -208,4 +232,96 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigration, type: :m
expect(batched_migration.prometheus_labels).to eq(labels)
end
end
+
+ describe '#smoothed_time_efficiency' do
+ let(:migration) { create(:batched_background_migration, interval: 120.seconds) }
+ let(:end_time) { Time.zone.now }
+
+ around do |example|
+ freeze_time do
+ example.run
+ end
+ end
+
+ let(:common_attrs) do
+ {
+ status: :succeeded,
+ batched_migration: migration,
+ finished_at: end_time
+ }
+ end
+
+ context 'when there are not enough jobs' do
+ subject { migration.smoothed_time_efficiency(number_of_jobs: 10) }
+
+ it 'returns nil' do
+ create_list(:batched_background_migration_job, 9, **common_attrs)
+
+ expect(subject).to be_nil
+ end
+ end
+
+ context 'when there are enough jobs' do
+ subject { migration.smoothed_time_efficiency(number_of_jobs: number_of_jobs) }
+
+ let!(:jobs) { create_list(:batched_background_migration_job, number_of_jobs, **common_attrs.merge(batched_migration: migration)) }
+ let(:number_of_jobs) { 10 }
+
+ before do
+ expect(migration).to receive_message_chain(:batched_jobs, :successful_in_execution_order, :reverse_order, :limit).with(no_args).with(no_args).with(number_of_jobs).and_return(jobs)
+ end
+
+ def mock_efficiencies(*effs)
+ effs.each_with_index do |eff, i|
+ expect(jobs[i]).to receive(:time_efficiency).and_return(eff)
+ end
+ end
+
+ context 'example 1: increasing trend, but only recently crossed threshold' do
+ it 'returns the smoothed time efficiency' do
+ mock_efficiencies(1.1, 1, 0.95, 0.9, 0.8, 0.95, 0.9, 0.8, 0.9, 0.95)
+
+ expect(subject).to be_within(0.05).of(0.95)
+ end
+ end
+
+ context 'example 2: increasing trend, crossed threshold a while ago' do
+ it 'returns the smoothed time efficiency' do
+ mock_efficiencies(1.2, 1.1, 1, 1, 1.1, 1, 0.95, 0.9, 0.95, 0.9)
+
+ expect(subject).to be_within(0.05).of(1.1)
+ end
+ end
+
+ context 'example 3: decreasing trend, but only recently crossed threshold' do
+ it 'returns the smoothed time efficiency' do
+ mock_efficiencies(0.9, 0.95, 1, 1.2, 1.1, 1.2, 1.1, 1.0, 1.1, 1.0)
+
+ expect(subject).to be_within(0.05).of(1.0)
+ end
+ end
+
+ context 'example 4: latest run spiked' do
+ it 'returns the smoothed time efficiency' do
+ mock_efficiencies(1.2, 0.9, 0.8, 0.9, 0.95, 0.9, 0.92, 0.9, 0.95, 0.9)
+
+ expect(subject).to be_within(0.02).of(0.96)
+ end
+ end
+ end
+ end
+
+ describe '#optimize!' do
+ subject { batched_migration.optimize! }
+
+ let(:batched_migration) { create(:batched_background_migration) }
+ let(:optimizer) { instance_double('Gitlab::Database::BackgroundMigration::BatchOptimizer') }
+
+ it 'calls the BatchOptimizer' do
+ expect(Gitlab::Database::BackgroundMigration::BatchOptimizer).to receive(:new).with(batched_migration).and_return(optimizer)
+ expect(optimizer).to receive(:optimize!)
+
+ subject
+ end
+ end
end
diff --git a/spec/lib/gitlab/database/background_migration/batched_migration_wrapper_spec.rb b/spec/lib/gitlab/database/background_migration/batched_migration_wrapper_spec.rb
index 00d13f23d36..c1183a15e37 100644
--- a/spec/lib/gitlab/database/background_migration/batched_migration_wrapper_spec.rb
+++ b/spec/lib/gitlab/database/background_migration/batched_migration_wrapper_spec.rb
@@ -7,9 +7,16 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationWrapper, '
let(:job_class) { Gitlab::BackgroundMigration::CopyColumnUsingBackgroundMigrationJob }
+ let_it_be(:pause_ms) { 250 }
let_it_be(:active_migration) { create(:batched_background_migration, :active, job_arguments: [:id, :other_id]) }
- let!(:job_record) { create(:batched_background_migration_job, batched_migration: active_migration) }
+ let!(:job_record) do
+ create(:batched_background_migration_job,
+ batched_migration: active_migration,
+ pause_ms: pause_ms
+ )
+ end
+
let(:job_instance) { double('job instance', batch_metrics: {}) }
before do
@@ -17,7 +24,7 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationWrapper, '
end
it 'runs the migration job' do
- expect(job_instance).to receive(:perform).with(1, 10, 'events', 'id', 1, 'id', 'other_id')
+ expect(job_instance).to receive(:perform).with(1, 10, 'events', 'id', 1, pause_ms, 'id', 'other_id')
subject
end
@@ -42,6 +49,42 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationWrapper, '
end
end
+ context 'when running a job that failed previously' do
+ let!(:job_record) do
+ create(:batched_background_migration_job,
+ batched_migration: active_migration,
+ pause_ms: pause_ms,
+ attempts: 1,
+ status: :failed,
+ finished_at: 1.hour.ago,
+ metrics: { 'my_metrics' => 'some_value' }
+ )
+ end
+
+ it 'increments attempts and updates other fields' do
+ updated_metrics = { 'updated_metrics' => 'some_value' }
+
+ expect(job_instance).to receive(:perform)
+ expect(job_instance).to receive(:batch_metrics).and_return(updated_metrics)
+
+ expect(job_record).to receive(:update!).with(
+ hash_including(attempts: 2, status: :running, finished_at: nil, metrics: {})
+ ).and_call_original
+
+ freeze_time do
+ subject
+
+ job_record.reload
+
+ expect(job_record).not_to be_failed
+ expect(job_record.attempts).to eq(2)
+ expect(job_record.started_at).to eq(Time.current)
+ expect(job_record.finished_at).to eq(Time.current)
+ expect(job_record.metrics).to eq(updated_metrics)
+ end
+ end
+ end
+
context 'reporting prometheus metrics' do
let(:labels) { job_record.batched_migration.prometheus_labels }
@@ -61,12 +104,26 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationWrapper, '
subject
end
+ it 'reports interval' do
+ expect(described_class.metrics[:gauge_interval]).to receive(:set).with(labels, job_record.batched_migration.interval)
+
+ subject
+ end
+
it 'reports updated tuples (currently based on batch_size)' do
expect(described_class.metrics[:counter_updated_tuples]).to receive(:increment).with(labels, job_record.batch_size)
subject
end
+ it 'reports migrated tuples' do
+ count = double
+ expect(job_record.batched_migration).to receive(:migrated_tuple_count).and_return(count)
+ expect(described_class.metrics[:gauge_migrated_tuples]).to receive(:set).with(labels, count)
+
+ subject
+ end
+
it 'reports summary of query timings' do
metrics = { 'timings' => { 'update_all' => [1, 2, 3, 4, 5] } }
@@ -82,14 +139,26 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationWrapper, '
subject
end
- it 'reports time efficiency' do
+ it 'reports job duration' do
freeze_time do
expect(Time).to receive(:current).and_return(Time.zone.now - 5.seconds).ordered
- expect(Time).to receive(:current).and_return(Time.zone.now).ordered
+ allow(Time).to receive(:current).and_call_original
+
+ expect(described_class.metrics[:gauge_job_duration]).to receive(:set).with(labels, 5.seconds)
- ratio = 5 / job_record.batched_migration.interval.to_f
+ subject
+ end
+ end
- expect(described_class.metrics[:histogram_time_efficiency]).to receive(:observe).with(labels, ratio)
+ it 'reports the total tuple count for the migration' do
+ expect(described_class.metrics[:gauge_total_tuple_count]).to receive(:set).with(labels, job_record.batched_migration.total_tuple_count)
+
+ subject
+ end
+
+ it 'reports last updated at timestamp' do
+ freeze_time do
+ expect(described_class.metrics[:gauge_last_update_time]).to receive(:set).with(labels, Time.current.to_i)
subject
end
@@ -98,7 +167,7 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationWrapper, '
context 'when the migration job does not raise an error' do
it 'marks the tracking record as succeeded' do
- expect(job_instance).to receive(:perform).with(1, 10, 'events', 'id', 1, 'id', 'other_id')
+ expect(job_instance).to receive(:perform).with(1, 10, 'events', 'id', 1, pause_ms, 'id', 'other_id')
freeze_time do
subject
@@ -112,19 +181,24 @@ RSpec.describe Gitlab::Database::BackgroundMigration::BatchedMigrationWrapper, '
end
context 'when the migration job raises an error' do
- it 'marks the tracking record as failed before raising the error' do
- expect(job_instance).to receive(:perform)
- .with(1, 10, 'events', 'id', 1, 'id', 'other_id')
- .and_raise(RuntimeError, 'Something broke!')
+ shared_examples 'an error is raised' do |error_class|
+ it 'marks the tracking record as failed' do
+ expect(job_instance).to receive(:perform)
+ .with(1, 10, 'events', 'id', 1, pause_ms, 'id', 'other_id')
+ .and_raise(error_class)
- freeze_time do
- expect { subject }.to raise_error(RuntimeError, 'Something broke!')
+ freeze_time do
+ expect { subject }.to raise_error(error_class)
- reloaded_job_record = job_record.reload
+ reloaded_job_record = job_record.reload
- expect(reloaded_job_record).to be_failed
- expect(reloaded_job_record.finished_at).to eq(Time.current)
+ expect(reloaded_job_record).to be_failed
+ expect(reloaded_job_record.finished_at).to eq(Time.current)
+ end
end
end
+
+ it_behaves_like 'an error is raised', RuntimeError.new('Something broke!')
+ it_behaves_like 'an error is raised', SignalException.new('SIGTERM')
end
end
diff --git a/spec/lib/gitlab/database/migration_helpers/cascading_namespace_settings_spec.rb b/spec/lib/gitlab/database/migration_helpers/cascading_namespace_settings_spec.rb
new file mode 100644
index 00000000000..e11ffe53c61
--- /dev/null
+++ b/spec/lib/gitlab/database/migration_helpers/cascading_namespace_settings_spec.rb
@@ -0,0 +1,50 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Database::MigrationHelpers::CascadingNamespaceSettings do
+ let(:migration) do
+ ActiveRecord::Migration.new.extend(described_class)
+ end
+
+ describe '#add_cascading_namespace_setting' do
+ it 'creates the required columns', :aggregate_failures do
+ expect(migration).to receive(:add_column).with(:namespace_settings, :some_setting, :integer, null: true, default: nil)
+ expect(migration).to receive(:add_column).with(:namespace_settings, :lock_some_setting, :boolean, null: false, default: false)
+
+ expect(migration).to receive(:add_column).with(:application_settings, :some_setting, :integer, null: false, default: 5)
+ expect(migration).to receive(:add_column).with(:application_settings, :lock_some_setting, :boolean, null: false, default: false)
+
+ migration.add_cascading_namespace_setting(:some_setting, :integer, null: false, default: 5)
+ end
+
+ context 'when columns already exist' do
+ before do
+ migration.add_column(:namespace_settings, :cascading_setting, :integer)
+ migration.add_column(:application_settings, :lock_cascading_setting, :boolean)
+ end
+
+ it 'raises an error when some columns already exist' do
+ expect do
+ migration.add_cascading_namespace_setting(:cascading_setting, :integer)
+ end.to raise_error %r/Existing columns: namespace_settings.cascading_setting, application_settings.lock_cascading_setting/
+ end
+ end
+ end
+
+ describe '#remove_cascading_namespace_setting' do
+ before do
+ allow(migration).to receive(:column_exists?).and_return(true)
+ end
+
+ it 'removes the columns', :aggregate_failures do
+ expect(migration).to receive(:remove_column).with(:namespace_settings, :some_setting)
+ expect(migration).to receive(:remove_column).with(:namespace_settings, :lock_some_setting)
+
+ expect(migration).to receive(:remove_column).with(:application_settings, :some_setting)
+ expect(migration).to receive(:remove_column).with(:application_settings, :lock_some_setting)
+
+ migration.remove_cascading_namespace_setting(:some_setting)
+ end
+ end
+end
diff --git a/spec/lib/gitlab/database/migration_helpers_spec.rb b/spec/lib/gitlab/database/migration_helpers_spec.rb
index 44293086e79..40720628a89 100644
--- a/spec/lib/gitlab/database/migration_helpers_spec.rb
+++ b/spec/lib/gitlab/database/migration_helpers_spec.rb
@@ -4,6 +4,7 @@ require 'spec_helper'
RSpec.describe Gitlab::Database::MigrationHelpers do
include Database::TableSchemaHelpers
+ include Database::TriggerHelpers
let(:model) do
ActiveRecord::Migration.new.extend(described_class)
@@ -834,7 +835,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'renames a column concurrently' do
expect(model).to receive(:check_trigger_permissions!).with(:users)
- expect(model).to receive(:install_rename_triggers_for_postgresql)
+ expect(model).to receive(:install_rename_triggers)
.with(:users, :old, :new)
expect(model).to receive(:add_column)
@@ -946,7 +947,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'reverses the operations of rename_column_concurrently' do
expect(model).to receive(:check_trigger_permissions!).with(:users)
- expect(model).to receive(:remove_rename_triggers_for_postgresql)
+ expect(model).to receive(:remove_rename_triggers)
.with(:users, /trigger_.{12}/)
expect(model).to receive(:remove_column).with(:users, :new)
@@ -959,7 +960,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'cleans up the renaming procedure' do
expect(model).to receive(:check_trigger_permissions!).with(:users)
- expect(model).to receive(:remove_rename_triggers_for_postgresql)
+ expect(model).to receive(:remove_rename_triggers)
.with(:users, /trigger_.{12}/)
expect(model).to receive(:remove_column).with(:users, :old)
@@ -999,7 +1000,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'reverses the operations of cleanup_concurrent_column_rename' do
expect(model).to receive(:check_trigger_permissions!).with(:users)
- expect(model).to receive(:install_rename_triggers_for_postgresql)
+ expect(model).to receive(:install_rename_triggers)
.with(:users, :old, :new)
expect(model).to receive(:add_column)
@@ -1094,7 +1095,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'reverses the operations of change_column_type_concurrently' do
expect(model).to receive(:check_trigger_permissions!).with(:users)
- expect(model).to receive(:remove_rename_triggers_for_postgresql)
+ expect(model).to receive(:remove_rename_triggers)
.with(:users, /trigger_.{12}/)
expect(model).to receive(:remove_column).with(:users, "old_for_type_change")
@@ -1159,7 +1160,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
expect(model).to receive(:rename_column)
.with(:users, temp_undo_cleanup_column, :old)
- expect(model).to receive(:install_rename_triggers_for_postgresql)
+ expect(model).to receive(:install_rename_triggers)
.with(:users, :old, 'old_for_type_change')
model.undo_cleanup_concurrent_column_type_change(:users, :old, :string)
@@ -1185,7 +1186,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
expect(model).to receive(:rename_column)
.with(:users, temp_undo_cleanup_column, :old)
- expect(model).to receive(:install_rename_triggers_for_postgresql)
+ expect(model).to receive(:install_rename_triggers)
.with(:users, :old, 'old_for_type_change')
model.undo_cleanup_concurrent_column_type_change(
@@ -1206,8 +1207,8 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
end
end
- describe '#install_rename_triggers_for_postgresql' do
- it 'installs the triggers for PostgreSQL' do
+ describe '#install_rename_triggers' do
+ it 'installs the triggers' do
copy_trigger = double('copy trigger')
expect(Gitlab::Database::UnidirectionalCopyTrigger).to receive(:on_table)
@@ -1215,11 +1216,11 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
expect(copy_trigger).to receive(:create).with(:old, :new, trigger_name: 'foo')
- model.install_rename_triggers_for_postgresql(:users, :old, :new, trigger_name: 'foo')
+ model.install_rename_triggers(:users, :old, :new, trigger_name: 'foo')
end
end
- describe '#remove_rename_triggers_for_postgresql' do
+ describe '#remove_rename_triggers' do
it 'removes the function and trigger' do
copy_trigger = double('copy trigger')
@@ -1228,7 +1229,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
expect(copy_trigger).to receive(:drop).with('foo')
- model.remove_rename_triggers_for_postgresql('bar', 'foo')
+ model.remove_rename_triggers('bar', 'foo')
end
end
@@ -1702,10 +1703,16 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
end
end
+ describe '#convert_to_bigint_column' do
+ it 'returns the name of the temporary column used to convert to bigint' do
+ expect(model.convert_to_bigint_column(:id)).to eq('id_convert_to_bigint')
+ end
+ end
+
describe '#initialize_conversion_of_integer_to_bigint' do
let(:table) { :test_table }
let(:column) { :id }
- let(:tmp_column) { "#{column}_convert_to_bigint" }
+ let(:tmp_column) { model.convert_to_bigint_column(column) }
before do
model.create_table table, id: false do |t|
@@ -1730,12 +1737,10 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
end
end
- context 'when the column to convert does not exist' do
- let(:column) { :foobar }
-
+ context 'when the column to migrate does not exist' do
it 'raises an error' do
- expect { model.initialize_conversion_of_integer_to_bigint(table, column) }
- .to raise_error("Column #{column} does not exist on #{table}")
+ expect { model.initialize_conversion_of_integer_to_bigint(table, :this_column_is_not_real) }
+ .to raise_error(ArgumentError, "Column this_column_is_not_real does not exist on #{table}")
end
end
@@ -1743,7 +1748,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'creates a not-null bigint column and installs triggers' do
expect(model).to receive(:add_column).with(table, tmp_column, :bigint, default: 0, null: false)
- expect(model).to receive(:install_rename_triggers).with(table, column, tmp_column)
+ expect(model).to receive(:install_rename_triggers).with(table, [column], [tmp_column])
model.initialize_conversion_of_integer_to_bigint(table, column)
end
@@ -1755,7 +1760,7 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'creates a not-null bigint column and installs triggers' do
expect(model).to receive(:add_column).with(table, tmp_column, :bigint, default: 0, null: false)
- expect(model).to receive(:install_rename_triggers).with(table, column, tmp_column)
+ expect(model).to receive(:install_rename_triggers).with(table, [column], [tmp_column])
model.initialize_conversion_of_integer_to_bigint(table, column)
end
@@ -1767,22 +1772,83 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'creates a nullable bigint column and installs triggers' do
expect(model).to receive(:add_column).with(table, tmp_column, :bigint, default: nil)
- expect(model).to receive(:install_rename_triggers).with(table, column, tmp_column)
+ expect(model).to receive(:install_rename_triggers).with(table, [column], [tmp_column])
model.initialize_conversion_of_integer_to_bigint(table, column)
end
end
+
+ context 'when multiple columns are given' do
+ it 'creates the correct columns and installs the trigger' do
+ columns_to_convert = %i[id non_nullable_column nullable_column]
+ temporary_columns = columns_to_convert.map { |column| model.convert_to_bigint_column(column) }
+
+ expect(model).to receive(:add_column).with(table, temporary_columns[0], :bigint, default: 0, null: false)
+ expect(model).to receive(:add_column).with(table, temporary_columns[1], :bigint, default: 0, null: false)
+ expect(model).to receive(:add_column).with(table, temporary_columns[2], :bigint, default: nil)
+
+ expect(model).to receive(:install_rename_triggers).with(table, columns_to_convert, temporary_columns)
+
+ model.initialize_conversion_of_integer_to_bigint(table, columns_to_convert)
+ end
+ end
+ end
+
+ describe '#revert_initialize_conversion_of_integer_to_bigint' do
+ let(:table) { :test_table }
+
+ before do
+ model.create_table table, id: false do |t|
+ t.integer :id, primary_key: true
+ t.integer :other_id
+ end
+
+ model.initialize_conversion_of_integer_to_bigint(table, columns)
+ end
+
+ context 'when single column is given' do
+ let(:columns) { :id }
+
+ it 'removes column, trigger, and function' do
+ temporary_column = model.convert_to_bigint_column(:id)
+ trigger_name = model.rename_trigger_name(table, :id, temporary_column)
+
+ model.revert_initialize_conversion_of_integer_to_bigint(table, columns)
+
+ expect(model.column_exists?(table, temporary_column)).to eq(false)
+ expect_trigger_not_to_exist(table, trigger_name)
+ expect_function_not_to_exist(trigger_name)
+ end
+ end
+
+ context 'when multiple columns are given' do
+ let(:columns) { [:id, :other_id] }
+
+ it 'removes column, trigger, and function' do
+ temporary_columns = columns.map { |column| model.convert_to_bigint_column(column) }
+ trigger_name = model.rename_trigger_name(table, columns, temporary_columns)
+
+ model.revert_initialize_conversion_of_integer_to_bigint(table, columns)
+
+ temporary_columns.each do |column|
+ expect(model.column_exists?(table, column)).to eq(false)
+ end
+ expect_trigger_not_to_exist(table, trigger_name)
+ expect_function_not_to_exist(trigger_name)
+ end
+ end
end
describe '#backfill_conversion_of_integer_to_bigint' do
let(:table) { :_test_backfill_table }
let(:column) { :id }
- let(:tmp_column) { "#{column}_convert_to_bigint" }
+ let(:tmp_column) { model.convert_to_bigint_column(column) }
before do
model.create_table table, id: false do |t|
t.integer :id, primary_key: true
t.text :message, null: false
+ t.integer :other_id
t.timestamps
end
@@ -1808,14 +1874,14 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
it 'raises an error' do
expect { model.backfill_conversion_of_integer_to_bigint(table, column) }
- .to raise_error("Column #{column} does not exist on #{table}")
+ .to raise_error(ArgumentError, "Column #{column} does not exist on #{table}")
end
end
context 'when the temporary column does not exist' do
it 'raises an error' do
expect { model.backfill_conversion_of_integer_to_bigint(table, column) }
- .to raise_error('The temporary column does not exist, initialize it with `initialize_conversion_of_integer_to_bigint`')
+ .to raise_error(ArgumentError, "Column #{tmp_column} does not exist on #{table}")
end
end
@@ -1829,48 +1895,112 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
let(:migration_relation) { Gitlab::Database::BackgroundMigration::BatchedMigration.active }
before do
- model.initialize_conversion_of_integer_to_bigint(table, column)
+ model.initialize_conversion_of_integer_to_bigint(table, columns)
model_class.create!(message: 'hello')
model_class.create!(message: 'so long')
end
- it 'creates the batched migration tracking record' do
- last_record = model_class.create!(message: 'goodbye')
+ context 'when a single column is being converted' do
+ let(:columns) { column }
- expect do
- model.backfill_conversion_of_integer_to_bigint(table, column, batch_size: 2, sub_batch_size: 1)
- end.to change { migration_relation.count }.by(1)
-
- expect(migration_relation.last).to have_attributes(
- job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
- table_name: table.to_s,
- column_name: column.to_s,
- min_value: 1,
- max_value: last_record.id,
- interval: 120,
- batch_size: 2,
- sub_batch_size: 1,
- job_arguments: [column.to_s, "#{column}_convert_to_bigint"]
- )
+ it 'creates the batched migration tracking record' do
+ last_record = model_class.create!(message: 'goodbye')
+
+ expect do
+ model.backfill_conversion_of_integer_to_bigint(table, column, batch_size: 2, sub_batch_size: 1)
+ end.to change { migration_relation.count }.by(1)
+
+ expect(migration_relation.last).to have_attributes(
+ job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
+ table_name: table.to_s,
+ column_name: column.to_s,
+ min_value: 1,
+ max_value: last_record.id,
+ interval: 120,
+ batch_size: 2,
+ sub_batch_size: 1,
+ job_arguments: [[column.to_s], [model.convert_to_bigint_column(column)]]
+ )
+ end
end
- context 'when the migration should be performed inline' do
- it 'calls the runner to run the entire migration' do
- expect(model).to receive(:perform_background_migration_inline?).and_return(true)
+ context 'when multiple columns are being converted' do
+ let(:other_column) { :other_id }
+ let(:other_tmp_column) { model.convert_to_bigint_column(other_column) }
+ let(:columns) { [column, other_column] }
- expect_next_instance_of(Gitlab::Database::BackgroundMigration::BatchedMigrationRunner) do |scheduler|
- expect(scheduler).to receive(:run_entire_migration) do |batched_migration|
- expect(batched_migration).to eq(migration_relation.last)
- end
- end
+ it 'creates the batched migration tracking record' do
+ last_record = model_class.create!(message: 'goodbye', other_id: 50)
- model.backfill_conversion_of_integer_to_bigint(table, column, batch_size: 2, sub_batch_size: 1)
+ expect do
+ model.backfill_conversion_of_integer_to_bigint(table, columns, batch_size: 2, sub_batch_size: 1)
+ end.to change { migration_relation.count }.by(1)
+
+ expect(migration_relation.last).to have_attributes(
+ job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
+ table_name: table.to_s,
+ column_name: column.to_s,
+ min_value: 1,
+ max_value: last_record.id,
+ interval: 120,
+ batch_size: 2,
+ sub_batch_size: 1,
+ job_arguments: [[column.to_s, other_column.to_s], [tmp_column, other_tmp_column]]
+ )
end
end
end
end
+ describe '#revert_backfill_conversion_of_integer_to_bigint' do
+ let(:table) { :_test_backfill_table }
+ let(:primary_key) { :id }
+
+ before do
+ model.create_table table, id: false do |t|
+ t.integer primary_key, primary_key: true
+ t.text :message, null: false
+ t.integer :other_id
+ t.timestamps
+ end
+
+ model.initialize_conversion_of_integer_to_bigint(table, columns, primary_key: primary_key)
+ model.backfill_conversion_of_integer_to_bigint(table, columns, primary_key: primary_key)
+ end
+
+ context 'when a single column is being converted' do
+ let(:columns) { :id }
+
+ it 'deletes the batched migration tracking record' do
+ expect do
+ model.revert_backfill_conversion_of_integer_to_bigint(table, columns)
+ end.to change { Gitlab::Database::BackgroundMigration::BatchedMigration.count }.by(-1)
+ end
+ end
+
+ context 'when a multiple columns are being converted' do
+ let(:columns) { [:id, :other_id] }
+
+ it 'deletes the batched migration tracking record' do
+ expect do
+ model.revert_backfill_conversion_of_integer_to_bigint(table, columns)
+ end.to change { Gitlab::Database::BackgroundMigration::BatchedMigration.count }.by(-1)
+ end
+ end
+
+ context 'when primary key column has custom name' do
+ let(:primary_key) { :other_pk }
+ let(:columns) { :other_id }
+
+ it 'deletes the batched migration tracking record' do
+ expect do
+ model.revert_backfill_conversion_of_integer_to_bigint(table, columns, primary_key: primary_key)
+ end.to change { Gitlab::Database::BackgroundMigration::BatchedMigration.count }.by(-1)
+ end
+ end
+ end
+
describe '#index_exists_by_name?' do
it 'returns true if an index exists' do
ActiveRecord::Base.connection.execute(
diff --git a/spec/lib/gitlab/database/migrations/instrumentation_spec.rb b/spec/lib/gitlab/database/migrations/instrumentation_spec.rb
index 3804dc52a77..6d047eed3bb 100644
--- a/spec/lib/gitlab/database/migrations/instrumentation_spec.rb
+++ b/spec/lib/gitlab/database/migrations/instrumentation_spec.rb
@@ -74,7 +74,7 @@ RSpec.describe Gitlab::Database::Migrations::Instrumentation do
before do
instance.observe(migration) { raise 'something went wrong' }
- rescue
+ rescue StandardError
# ignore
end
diff --git a/spec/lib/gitlab/database/migrations/observers/query_log_spec.rb b/spec/lib/gitlab/database/migrations/observers/query_log_spec.rb
new file mode 100644
index 00000000000..195e7114582
--- /dev/null
+++ b/spec/lib/gitlab/database/migrations/observers/query_log_spec.rb
@@ -0,0 +1,38 @@
+# frozen_string_literal: true
+require 'spec_helper'
+
+RSpec.describe Gitlab::Database::Migrations::Observers::QueryLog do
+ subject { described_class.new }
+
+ let(:observation) { Gitlab::Database::Migrations::Observation.new(migration) }
+ let(:connection) { ActiveRecord::Base.connection }
+ let(:query) { 'select 1' }
+ let(:directory_path) { Dir.mktmpdir }
+ let(:log_file) { "#{directory_path}/current.log" }
+ let(:migration) { 20210422152437 }
+
+ before do
+ stub_const('Gitlab::Database::Migrations::Instrumentation::RESULT_DIR', directory_path)
+ end
+
+ after do
+ FileUtils.remove_entry(directory_path)
+ end
+
+ it 'writes a file with the query log' do
+ observe
+
+ expect(File.read("#{directory_path}/#{migration}.log")).to include(query)
+ end
+
+ it 'does not change the default logger' do
+ expect { observe }.not_to change { ActiveRecord::Base.logger }
+ end
+
+ def observe
+ subject.before
+ connection.execute(query)
+ subject.after
+ subject.record(observation)
+ end
+end
diff --git a/spec/lib/gitlab/database/partitioning_migration_helpers/foreign_key_helpers_spec.rb b/spec/lib/gitlab/database/partitioning_migration_helpers/foreign_key_helpers_spec.rb
index 93dbd9d7c30..83f2436043c 100644
--- a/spec/lib/gitlab/database/partitioning_migration_helpers/foreign_key_helpers_spec.rb
+++ b/spec/lib/gitlab/database/partitioning_migration_helpers/foreign_key_helpers_spec.rb
@@ -10,6 +10,7 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::ForeignKeyHelpers
end
let_it_be(:connection) { ActiveRecord::Base.connection }
+
let(:referenced_table) { :issues }
let(:function_name) { '_test_partitioned_foreign_keys_function' }
let(:trigger_name) { '_test_partitioned_foreign_keys_trigger' }
diff --git a/spec/lib/gitlab/database/partitioning_migration_helpers/index_helpers_spec.rb b/spec/lib/gitlab/database/partitioning_migration_helpers/index_helpers_spec.rb
index 603f3dc41af..c3edc3a0c87 100644
--- a/spec/lib/gitlab/database/partitioning_migration_helpers/index_helpers_spec.rb
+++ b/spec/lib/gitlab/database/partitioning_migration_helpers/index_helpers_spec.rb
@@ -64,7 +64,7 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::IndexHelpers do
def expect_add_concurrent_index_and_call_original(table, column, index)
expect(migration).to receive(:add_concurrent_index).ordered.with(table, column, name: index)
- .and_wrap_original { |_, table, column, options| connection.add_index(table, column, options) }
+ .and_wrap_original { |_, table, column, options| connection.add_index(table, column, **options) }
end
end
diff --git a/spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb b/spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb
index 5b2a29d1d2d..79ddb450d7a 100644
--- a/spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb
+++ b/spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb
@@ -12,6 +12,7 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
end
let_it_be(:connection) { ActiveRecord::Base.connection }
+
let(:source_table) { :_test_original_table }
let(:partitioned_table) { '_test_migration_partitioned_table' }
let(:function_name) { '_test_migration_function_name' }
diff --git a/spec/lib/gitlab/database/reindexing/concurrent_reindex_spec.rb b/spec/lib/gitlab/database/reindexing/concurrent_reindex_spec.rb
index 51fc7c6620b..d9077969003 100644
--- a/spec/lib/gitlab/database/reindexing/concurrent_reindex_spec.rb
+++ b/spec/lib/gitlab/database/reindexing/concurrent_reindex_spec.rb
@@ -111,7 +111,7 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
end
it 'replaces the existing index with an identical index' do
- expect(connection).to receive(:execute).with('SET statement_timeout TO \'21600s\'').twice
+ expect(connection).to receive(:execute).with('SET statement_timeout TO \'32400s\'')
expect_to_execute_concurrently_in_order(create_index)
@@ -123,6 +123,10 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
expect_index_rename(replacement_name, index.name)
expect_index_rename(replaced_name, replacement_name)
+ expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
+ expect(instance).to receive(:run).with(raise_on_exhaustion: false).and_yield
+ end
+
expect_to_execute_concurrently_in_order(drop_index)
subject.perform
@@ -136,7 +140,7 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
end
it 'rebuilds table statistics before dropping the original index' do
- expect(connection).to receive(:execute).with('SET statement_timeout TO \'21600s\'').twice
+ expect(connection).to receive(:execute).with('SET statement_timeout TO \'32400s\'')
expect_to_execute_concurrently_in_order(create_index)
@@ -152,7 +156,7 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
expect_index_rename(replacement_name, index.name)
expect_index_rename(replaced_name, replacement_name)
- expect_to_execute_concurrently_in_order(drop_index)
+ expect_index_drop(drop_index)
subject.perform
@@ -166,9 +170,7 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
end
it 'replaces the existing index with an identical index' do
- expect(connection).to receive(:execute).with('SET statement_timeout TO \'21600s\'').exactly(3).times
-
- expect_to_execute_concurrently_in_order(drop_index)
+ expect_index_drop(drop_index)
expect_to_execute_concurrently_in_order(create_index)
expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
@@ -179,7 +181,7 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
expect_index_rename(replacement_name, index.name)
expect_index_rename(replaced_name, replacement_name)
- expect_to_execute_concurrently_in_order(drop_index)
+ expect_index_drop(drop_index)
subject.perform
@@ -192,6 +194,10 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
expect(connection).to receive(:execute).with(create_index).ordered
.and_raise(ActiveRecord::ConnectionTimeoutError, 'connect timeout')
+ expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
+ expect(instance).to receive(:run).with(raise_on_exhaustion: false).and_yield
+ end
+
expect_to_execute_concurrently_in_order(drop_index)
expect { subject.perform }.to raise_error(ActiveRecord::ConnectionTimeoutError, /connect timeout/)
@@ -207,6 +213,10 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
expect_to_execute_concurrently_in_order(create_index)
+ expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
+ expect(instance).to receive(:run).with(raise_on_exhaustion: false).and_yield
+ end
+
expect_to_execute_concurrently_in_order(drop_index)
expect { subject.perform }.to raise_error(described_class::ReindexError, /replacement index was created as INVALID/)
@@ -228,7 +238,7 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
expect_index_rename(index.name, replaced_name).and_raise(ActiveRecord::ConnectionTimeoutError, 'connect timeout')
- expect_to_execute_concurrently_in_order(drop_index)
+ expect_index_drop(drop_index)
expect { subject.perform }.to raise_error(ActiveRecord::ConnectionTimeoutError, /connect timeout/)
@@ -245,7 +255,7 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
.and_raise(::Gitlab::Database::WithLockRetries::AttemptsExhaustedError, 'exhausted')
end
- expect_to_execute_concurrently_in_order(drop_index)
+ expect_index_drop(drop_index)
expect { subject.perform }.to raise_error(::Gitlab::Database::WithLockRetries::AttemptsExhaustedError, /exhausted/)
@@ -270,6 +280,14 @@ RSpec.describe Gitlab::Database::Reindexing::ConcurrentReindex, '#perform' do
SQL
end
+ def expect_index_drop(drop_index)
+ expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
+ expect(instance).to receive(:run).with(raise_on_exhaustion: false).and_yield
+ end
+
+ expect_to_execute_concurrently_in_order(drop_index)
+ end
+
def find_index_create_statement
ActiveRecord::Base.connection.select_value(<<~SQL)
SELECT indexdef
diff --git a/spec/lib/gitlab/database/schema_cache_with_renamed_table_spec.rb b/spec/lib/gitlab/database/schema_cache_with_renamed_table_spec.rb
new file mode 100644
index 00000000000..8c0c4155ccc
--- /dev/null
+++ b/spec/lib/gitlab/database/schema_cache_with_renamed_table_spec.rb
@@ -0,0 +1,83 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Database::SchemaCacheWithRenamedTable do
+ let(:old_model) do
+ Class.new(ActiveRecord::Base) do
+ self.table_name = 'projects'
+ end
+ end
+
+ let(:new_model) do
+ Class.new(ActiveRecord::Base) do
+ self.table_name = 'projects_new'
+ end
+ end
+
+ before do
+ stub_const('Gitlab::Database::TABLES_TO_BE_RENAMED', { 'projects' => 'projects_new' })
+ end
+
+ context 'when table is not renamed yet' do
+ before do
+ old_model.reset_column_information
+ ActiveRecord::Base.connection.schema_cache.clear!
+ end
+
+ it 'uses the original table to look up metadata' do
+ expect(old_model.primary_key).to eq('id')
+ end
+ end
+
+ context 'when table is renamed' do
+ before do
+ ActiveRecord::Base.connection.execute("ALTER TABLE projects RENAME TO projects_new")
+ ActiveRecord::Base.connection.execute("CREATE VIEW projects AS SELECT * FROM projects_new")
+
+ old_model.reset_column_information
+ ActiveRecord::Base.connection.schema_cache.clear!
+ end
+
+ it 'uses the renamed table to look up metadata' do
+ expect(old_model.primary_key).to eq('id')
+ end
+
+ it 'has primary key' do
+ expect(old_model.primary_key).to eq('id')
+ expect(old_model.primary_key).to eq(new_model.primary_key)
+ end
+
+ it 'has the same column definitions' do
+ expect(old_model.columns).to eq(new_model.columns)
+ end
+
+ it 'has the same indexes' do
+ indexes_for_old_table = ActiveRecord::Base.connection.schema_cache.indexes('projects')
+ indexes_for_new_table = ActiveRecord::Base.connection.schema_cache.indexes('projects_new')
+
+ expect(indexes_for_old_table).to eq(indexes_for_new_table)
+ end
+
+ it 'has the same column_hash' do
+ columns_hash_for_old_table = ActiveRecord::Base.connection.schema_cache.columns_hash('projects')
+ columns_hash_for_new_table = ActiveRecord::Base.connection.schema_cache.columns_hash('projects_new')
+
+ expect(columns_hash_for_old_table).to eq(columns_hash_for_new_table)
+ end
+
+ describe 'when the table behind a model is actually a view' do
+ let(:group) { create(:group) }
+ let(:project_attributes) { attributes_for(:project, namespace_id: group.id).except(:creator) }
+ let(:record) { old_model.create!(project_attributes) }
+
+ it 'can persist records' do
+ expect(record.reload.attributes).to eq(new_model.find(record.id).attributes)
+ end
+
+ it 'can find records' do
+ expect(old_model.find_by_id(record.id)).not_to be_nil
+ end
+ end
+ end
+end
diff --git a/spec/lib/gitlab/database/with_lock_retries_outside_transaction_spec.rb b/spec/lib/gitlab/database/with_lock_retries_outside_transaction_spec.rb
new file mode 100644
index 00000000000..e93d8ab590d
--- /dev/null
+++ b/spec/lib/gitlab/database/with_lock_retries_outside_transaction_spec.rb
@@ -0,0 +1,244 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Database::WithLockRetriesOutsideTransaction do
+ let(:env) { {} }
+ let(:logger) { Gitlab::Database::WithLockRetries::NULL_LOGGER }
+ let(:subject) { described_class.new(env: env, logger: logger, timing_configuration: timing_configuration) }
+
+ let(:timing_configuration) do
+ [
+ [1.second, 1.second],
+ [1.second, 1.second],
+ [1.second, 1.second],
+ [1.second, 1.second],
+ [1.second, 1.second]
+ ]
+ end
+
+ describe '#run' do
+ it 'requires block' do
+ expect { subject.run }.to raise_error(StandardError, 'no block given')
+ end
+
+ context 'when DISABLE_LOCK_RETRIES is set' do
+ let(:env) { { 'DISABLE_LOCK_RETRIES' => 'true' } }
+
+ it 'executes the passed block without retrying' do
+ object = double
+
+ expect(object).to receive(:method).once
+
+ subject.run { object.method }
+ end
+ end
+
+ context 'when lock retry is enabled' do
+ let(:lock_fiber) do
+ Fiber.new do
+ # Initiating a second DB connection for the lock
+ conn = ActiveRecordSecond.establish_connection(Rails.configuration.database_configuration[Rails.env]).connection
+ conn.transaction do
+ conn.execute("LOCK TABLE #{Project.table_name} in exclusive mode")
+
+ Fiber.yield
+ end
+ ActiveRecordSecond.remove_connection # force disconnect
+ end
+ end
+
+ before do
+ stub_const('ActiveRecordSecond', Class.new(ActiveRecord::Base))
+
+ lock_fiber.resume # start the transaction and lock the table
+ end
+
+ after do
+ lock_fiber.resume if lock_fiber.alive?
+ end
+
+ context 'lock_fiber' do
+ it 'acquires lock successfully' do
+ check_exclusive_lock_query = """
+ SELECT 1
+ FROM pg_locks l
+ JOIN pg_class t ON l.relation = t.oid
+ WHERE t.relkind = 'r' AND l.mode = 'ExclusiveLock' AND t.relname = '#{Project.table_name}'
+ """
+
+ expect(ActiveRecord::Base.connection.execute(check_exclusive_lock_query).to_a).to be_present
+ end
+ end
+
+ shared_examples 'retriable exclusive lock on `projects`' do
+ it 'succeeds executing the given block' do
+ lock_attempts = 0
+ lock_acquired = false
+
+ # the actual number of attempts to run_block_with_lock_timeout can never exceed the number of
+ # timings_configurations, so here we limit the retry_count if it exceeds that value
+ #
+ # also, there is no call to sleep after the final attempt, which is why it will always be one less
+ expected_runs_with_timeout = [retry_count, timing_configuration.size].min
+ expect(subject).to receive(:sleep).exactly(expected_runs_with_timeout - 1).times
+
+ expect(subject).to receive(:run_block_with_lock_timeout).exactly(expected_runs_with_timeout).times.and_wrap_original do |method|
+ lock_fiber.resume if lock_attempts == retry_count
+
+ method.call
+ end
+
+ subject.run do
+ lock_attempts += 1
+
+ if lock_attempts == retry_count # we reached the last retry iteration, if we kill the thread, the last try (no lock_timeout) will succeed
+ lock_fiber.resume
+ end
+
+ ActiveRecord::Base.transaction do
+ ActiveRecord::Base.connection.execute("LOCK TABLE #{Project.table_name} in exclusive mode")
+ lock_acquired = true
+ end
+ end
+
+ expect(lock_attempts).to eq(retry_count)
+ expect(lock_acquired).to eq(true)
+ end
+ end
+
+ context 'after 3 iterations' do
+ it_behaves_like 'retriable exclusive lock on `projects`' do
+ let(:retry_count) { 4 }
+ end
+
+ context 'setting the idle transaction timeout' do
+ context 'when there is no outer transaction: disable_ddl_transaction! is set in the migration' do
+ it 'does not disable the idle transaction timeout' do
+ allow(ActiveRecord::Base.connection).to receive(:transaction_open?).and_return(false)
+ allow(subject).to receive(:run_block_with_lock_timeout).once.and_raise(ActiveRecord::LockWaitTimeout)
+ allow(subject).to receive(:run_block_with_lock_timeout).once
+
+ expect(subject).not_to receive(:disable_idle_in_transaction_timeout)
+
+ subject.run {}
+ end
+ end
+
+ context 'when there is outer transaction: disable_ddl_transaction! is not set in the migration' do
+ it 'disables the idle transaction timeout so the code can sleep and retry' do
+ allow(ActiveRecord::Base.connection).to receive(:transaction_open?).and_return(true)
+
+ n = 0
+ allow(subject).to receive(:run_block_with_lock_timeout).twice do
+ n += 1
+ raise(ActiveRecord::LockWaitTimeout) if n == 1
+ end
+
+ expect(subject).to receive(:disable_idle_in_transaction_timeout).once
+
+ subject.run {}
+ end
+ end
+ end
+ end
+
+ context 'after the retries are exhausted' do
+ let(:timing_configuration) do
+ [
+ [1.second, 1.second]
+ ]
+ end
+
+ it 'disables the lock_timeout' do
+ allow(subject).to receive(:run_block_with_lock_timeout).once.and_raise(ActiveRecord::LockWaitTimeout)
+
+ expect(subject).to receive(:disable_lock_timeout)
+
+ subject.run {}
+ end
+ end
+
+ context 'after the retries, without setting lock_timeout' do
+ let(:retry_count) { timing_configuration.size + 1 }
+
+ it_behaves_like 'retriable exclusive lock on `projects`' do
+ before do
+ expect(subject).to receive(:run_block_without_lock_timeout).and_call_original
+ end
+ end
+ end
+
+ context 'after the retries, when requested to raise an error' do
+ let(:expected_attempts_with_timeout) { timing_configuration.size }
+ let(:retry_count) { timing_configuration.size + 1 }
+
+ it 'raises an error instead of waiting indefinitely for the lock' do
+ lock_attempts = 0
+ lock_acquired = false
+
+ expect(subject).to receive(:sleep).exactly(expected_attempts_with_timeout - 1).times
+ expect(subject).to receive(:run_block_with_lock_timeout).exactly(expected_attempts_with_timeout).times.and_call_original
+
+ expect do
+ subject.run(raise_on_exhaustion: true) do
+ lock_attempts += 1
+
+ ActiveRecord::Base.transaction do
+ ActiveRecord::Base.connection.execute("LOCK TABLE #{Project.table_name} in exclusive mode")
+ lock_acquired = true
+ end
+ end
+ end.to raise_error(described_class::AttemptsExhaustedError)
+
+ expect(lock_attempts).to eq(retry_count - 1)
+ expect(lock_acquired).to eq(false)
+ end
+ end
+
+ context 'when statement timeout is reached' do
+ it 'raises StatementInvalid error' do
+ lock_acquired = false
+ ActiveRecord::Base.connection.execute("SET statement_timeout='100ms'")
+
+ expect do
+ subject.run do
+ ActiveRecord::Base.connection.execute("SELECT 1 FROM pg_sleep(0.11)") # 110ms
+ lock_acquired = true
+ end
+ end.to raise_error(ActiveRecord::StatementInvalid)
+
+ expect(lock_acquired).to eq(false)
+ end
+ end
+ end
+ end
+
+ context 'restore local database variables' do
+ it do
+ expect { subject.run {} }.not_to change { ActiveRecord::Base.connection.execute("SHOW lock_timeout").to_a }
+ end
+
+ it do
+ expect { subject.run {} }.not_to change { ActiveRecord::Base.connection.execute("SHOW idle_in_transaction_session_timeout").to_a }
+ end
+ end
+
+ context 'casting durations correctly' do
+ let(:timing_configuration) { [[0.015.seconds, 0.025.seconds], [0.015.seconds, 0.025.seconds]] } # 15ms, 25ms
+
+ it 'executes `SET lock_timeout` using the configured timeout value in milliseconds' do
+ expect(ActiveRecord::Base.connection).to receive(:execute).with('RESET idle_in_transaction_session_timeout; RESET lock_timeout').and_call_original
+ expect(ActiveRecord::Base.connection).to receive(:execute).with("SET lock_timeout TO '15ms'").and_call_original
+
+ subject.run { }
+ end
+
+ it 'calls `sleep` after the first iteration fails, using the configured sleep time' do
+ expect(subject).to receive(:run_block_with_lock_timeout).and_raise(ActiveRecord::LockWaitTimeout).twice
+ expect(subject).to receive(:sleep).with(0.025)
+
+ subject.run { }
+ end
+ end
+end
diff --git a/spec/lib/gitlab/database/with_lock_retries_spec.rb b/spec/lib/gitlab/database/with_lock_retries_spec.rb
index 563399ff0d9..b08f39fc92a 100644
--- a/spec/lib/gitlab/database/with_lock_retries_spec.rb
+++ b/spec/lib/gitlab/database/with_lock_retries_spec.rb
@@ -76,14 +76,14 @@ RSpec.describe Gitlab::Database::WithLockRetries do
lock_attempts = 0
lock_acquired = false
- # the actual number of attempts to run_block_with_transaction can never exceed the number of
+ # the actual number of attempts to run_block_with_lock_timeout can never exceed the number of
# timings_configurations, so here we limit the retry_count if it exceeds that value
#
# also, there is no call to sleep after the final attempt, which is why it will always be one less
expected_runs_with_timeout = [retry_count, timing_configuration.size].min
expect(subject).to receive(:sleep).exactly(expected_runs_with_timeout - 1).times
- expect(subject).to receive(:run_block_with_transaction).exactly(expected_runs_with_timeout).times.and_wrap_original do |method|
+ expect(subject).to receive(:run_block_with_lock_timeout).exactly(expected_runs_with_timeout).times.and_wrap_original do |method|
lock_fiber.resume if lock_attempts == retry_count
method.call
@@ -116,8 +116,8 @@ RSpec.describe Gitlab::Database::WithLockRetries do
context 'when there is no outer transaction: disable_ddl_transaction! is set in the migration' do
it 'does not disable the idle transaction timeout' do
allow(ActiveRecord::Base.connection).to receive(:transaction_open?).and_return(false)
- allow(subject).to receive(:run_block_with_transaction).once.and_raise(ActiveRecord::LockWaitTimeout)
- allow(subject).to receive(:run_block_with_transaction).once
+ allow(subject).to receive(:run_block_with_lock_timeout).once.and_raise(ActiveRecord::LockWaitTimeout)
+ allow(subject).to receive(:run_block_with_lock_timeout).once
expect(subject).not_to receive(:disable_idle_in_transaction_timeout)
@@ -130,7 +130,7 @@ RSpec.describe Gitlab::Database::WithLockRetries do
allow(ActiveRecord::Base.connection).to receive(:transaction_open?).and_return(true)
n = 0
- allow(subject).to receive(:run_block_with_transaction).twice do
+ allow(subject).to receive(:run_block_with_lock_timeout).twice do
n += 1
raise(ActiveRecord::LockWaitTimeout) if n == 1
end
@@ -153,7 +153,7 @@ RSpec.describe Gitlab::Database::WithLockRetries do
context 'when there is no outer transaction: disable_ddl_transaction! is set in the migration' do
it 'does not disable the lock_timeout' do
allow(ActiveRecord::Base.connection).to receive(:transaction_open?).and_return(false)
- allow(subject).to receive(:run_block_with_transaction).once.and_raise(ActiveRecord::LockWaitTimeout)
+ allow(subject).to receive(:run_block_with_lock_timeout).once.and_raise(ActiveRecord::LockWaitTimeout)
expect(subject).not_to receive(:disable_lock_timeout)
@@ -164,7 +164,7 @@ RSpec.describe Gitlab::Database::WithLockRetries do
context 'when there is outer transaction: disable_ddl_transaction! is not set in the migration' do
it 'disables the lock_timeout' do
allow(ActiveRecord::Base.connection).to receive(:transaction_open?).and_return(true)
- allow(subject).to receive(:run_block_with_transaction).once.and_raise(ActiveRecord::LockWaitTimeout)
+ allow(subject).to receive(:run_block_with_lock_timeout).once.and_raise(ActiveRecord::LockWaitTimeout)
expect(subject).to receive(:disable_lock_timeout)
@@ -192,7 +192,7 @@ RSpec.describe Gitlab::Database::WithLockRetries do
lock_acquired = false
expect(subject).to receive(:sleep).exactly(expected_attempts_with_timeout - 1).times
- expect(subject).to receive(:run_block_with_transaction).exactly(expected_attempts_with_timeout).times.and_call_original
+ expect(subject).to receive(:run_block_with_lock_timeout).exactly(expected_attempts_with_timeout).times.and_call_original
expect do
subject.run(raise_on_exhaustion: true) do
@@ -251,7 +251,7 @@ RSpec.describe Gitlab::Database::WithLockRetries do
end
it 'calls `sleep` after the first iteration fails, using the configured sleep time' do
- expect(subject).to receive(:run_block_with_transaction).and_raise(ActiveRecord::LockWaitTimeout).twice
+ expect(subject).to receive(:run_block_with_lock_timeout).and_raise(ActiveRecord::LockWaitTimeout).twice
expect(subject).to receive(:sleep).with(0.025)
subject.run { }