Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb')
-rw-r--r--spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb238
1 files changed, 137 insertions, 101 deletions
diff --git a/spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb b/spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb
index 7fdf049a823..8ecb04bfdd6 100644
--- a/spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb
+++ b/spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb
@@ -42,159 +42,195 @@ RSpec.shared_examples 'it runs background migration jobs' do |tracking_database|
describe '#perform' do
let(:worker) { described_class.new }
- before do
- allow(worker).to receive(:jid).and_return(1)
- allow(worker).to receive(:always_perform?).and_return(false)
+ context 'when execute_background_migrations feature flag is disabled' do
+ before do
+ stub_feature_flags(execute_background_migrations: false)
+ end
- allow(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(false)
- end
+ it 'does not perform the job, reschedules it in the future, and logs a message' do
+ expect(worker).not_to receive(:perform_with_connection)
- it 'performs jobs using the coordinator for the worker' do
- expect_next_instance_of(Gitlab::BackgroundMigration::JobCoordinator) do |coordinator|
- allow(coordinator).to receive(:with_shared_connection).and_yield
+ expect(Sidekiq.logger).to receive(:info) do |payload|
+ expect(payload[:class]).to eq(described_class.name)
+ expect(payload[:database]).to eq(tracking_database)
+ expect(payload[:message]).to match(/skipping execution, migration rescheduled/)
+ end
- expect(coordinator.worker_class).to eq(described_class)
- expect(coordinator).to receive(:perform).with('Foo', [10, 20])
- end
+ lease_attempts = 3
+ delay = described_class::BACKGROUND_MIGRATIONS_DELAY
+ job_args = [10, 20]
- worker.perform('Foo', [10, 20])
- end
+ freeze_time do
+ worker.perform('Foo', job_args, lease_attempts)
- context 'when lease can be obtained' do
- let(:coordinator) { double('job coordinator') }
+ job = described_class.jobs.find { |job| job['args'] == ['Foo', job_args, lease_attempts] }
+ expect(job).to be, "Expected the job to be rescheduled with (#{job_args}, #{lease_attempts}), but it was not."
+ expected_time = delay.to_i + Time.now.to_i
+ expect(job['at']).to eq(expected_time),
+ "Expected the job to be rescheduled in #{expected_time} seconds, " \
+ "but it was rescheduled in #{job['at']} seconds."
+ end
+ end
+ end
+
+ context 'when execute_background_migrations feature flag is enabled' do
before do
- allow(Gitlab::BackgroundMigration).to receive(:coordinator_for_database)
- .with(tracking_database)
- .and_return(coordinator)
+ stub_feature_flags(execute_background_migrations: true)
- allow(coordinator).to receive(:with_shared_connection).and_yield
+ allow(worker).to receive(:jid).and_return(1)
+ allow(worker).to receive(:always_perform?).and_return(false)
+
+ allow(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(false)
end
- it 'sets up the shared connection before checking replication' do
- expect(coordinator).to receive(:with_shared_connection).and_yield.ordered
- expect(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(false).ordered
+ it 'performs jobs using the coordinator for the worker' do
+ expect_next_instance_of(Gitlab::BackgroundMigration::JobCoordinator) do |coordinator|
+ allow(coordinator).to receive(:with_shared_connection).and_yield
- expect(coordinator).to receive(:perform).with('Foo', [10, 20])
+ expect(coordinator.worker_class).to eq(described_class)
+ expect(coordinator).to receive(:perform).with('Foo', [10, 20])
+ end
worker.perform('Foo', [10, 20])
end
- it 'performs a background migration' do
- expect(coordinator).to receive(:perform).with('Foo', [10, 20])
+ context 'when lease can be obtained' do
+ let(:coordinator) { double('job coordinator') }
- worker.perform('Foo', [10, 20])
- end
+ before do
+ allow(Gitlab::BackgroundMigration).to receive(:coordinator_for_database)
+ .with(tracking_database)
+ .and_return(coordinator)
+
+ allow(coordinator).to receive(:with_shared_connection).and_yield
+ end
+
+ it 'sets up the shared connection before checking replication' do
+ expect(coordinator).to receive(:with_shared_connection).and_yield.ordered
+ expect(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(false).ordered
- context 'when lease_attempts is 1' do
- it 'performs a background migration' do
expect(coordinator).to receive(:perform).with('Foo', [10, 20])
- worker.perform('Foo', [10, 20], 1)
+ worker.perform('Foo', [10, 20])
end
- end
- it 'can run scheduled job and retried job concurrently' do
- expect(coordinator)
- .to receive(:perform)
- .with('Foo', [10, 20])
- .exactly(2).time
-
- worker.perform('Foo', [10, 20])
- worker.perform('Foo', [10, 20], described_class::MAX_LEASE_ATTEMPTS - 1)
- end
+ it 'performs a background migration' do
+ expect(coordinator).to receive(:perform).with('Foo', [10, 20])
- it 'sets the class that will be executed as the caller_id' do
- expect(coordinator).to receive(:perform) do
- expect(Gitlab::ApplicationContext.current).to include('meta.caller_id' => 'Foo')
+ worker.perform('Foo', [10, 20])
end
- worker.perform('Foo', [10, 20])
- end
- end
+ context 'when lease_attempts is 1' do
+ it 'performs a background migration' do
+ expect(coordinator).to receive(:perform).with('Foo', [10, 20])
- context 'when lease not obtained (migration of same class was performed recently)' do
- let(:timeout) { described_class.minimum_interval }
- let(:lease_key) { "#{described_class.name}:Foo" }
- let(:coordinator) { double('job coordinator') }
+ worker.perform('Foo', [10, 20], 1)
+ end
+ end
- before do
- allow(Gitlab::BackgroundMigration).to receive(:coordinator_for_database)
- .with(tracking_database)
- .and_return(coordinator)
+ it 'can run scheduled job and retried job concurrently' do
+ expect(coordinator)
+ .to receive(:perform)
+ .with('Foo', [10, 20])
+ .exactly(2).time
- allow(coordinator).to receive(:with_shared_connection).and_yield
+ worker.perform('Foo', [10, 20])
+ worker.perform('Foo', [10, 20], described_class::MAX_LEASE_ATTEMPTS - 1)
+ end
- expect(coordinator).not_to receive(:perform)
+ it 'sets the class that will be executed as the caller_id' do
+ expect(coordinator).to receive(:perform) do
+ expect(Gitlab::ApplicationContext.current).to include('meta.caller_id' => 'Foo')
+ end
- Gitlab::ExclusiveLease.new(lease_key, timeout: timeout).try_obtain
+ worker.perform('Foo', [10, 20])
+ end
end
- it 'reschedules the migration and decrements the lease_attempts' do
- expect(described_class)
- .to receive(:perform_in)
- .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
+ context 'when lease not obtained (migration of same class was performed recently)' do
+ let(:timeout) { described_class.minimum_interval }
+ let(:lease_key) { "#{described_class.name}:Foo" }
+ let(:coordinator) { double('job coordinator') }
- worker.perform('Foo', [10, 20], 5)
- end
+ before do
+ allow(Gitlab::BackgroundMigration).to receive(:coordinator_for_database)
+ .with(tracking_database)
+ .and_return(coordinator)
- context 'when lease_attempts is 1' do
- let(:lease_key) { "#{described_class.name}:Foo:retried" }
+ allow(coordinator).to receive(:with_shared_connection).and_yield
+
+ expect(coordinator).not_to receive(:perform)
+
+ Gitlab::ExclusiveLease.new(lease_key, timeout: timeout).try_obtain
+ end
it 'reschedules the migration and decrements the lease_attempts' do
expect(described_class)
.to receive(:perform_in)
- .with(a_kind_of(Numeric), 'Foo', [10, 20], 0)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
- worker.perform('Foo', [10, 20], 1)
+ worker.perform('Foo', [10, 20], 5)
end
- end
- context 'when lease_attempts is 0' do
- let(:lease_key) { "#{described_class.name}:Foo:retried" }
+ context 'when lease_attempts is 1' do
+ let(:lease_key) { "#{described_class.name}:Foo:retried" }
- it 'gives up performing the migration' do
- expect(described_class).not_to receive(:perform_in)
- expect(Sidekiq.logger).to receive(:warn).with(
- class: 'Foo',
- message: 'Job could not get an exclusive lease after several tries. Giving up.',
- job_id: 1)
+ it 'reschedules the migration and decrements the lease_attempts' do
+ expect(described_class)
+ .to receive(:perform_in)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 0)
- worker.perform('Foo', [10, 20], 0)
+ worker.perform('Foo', [10, 20], 1)
+ end
end
- end
- end
- context 'when database is not healthy' do
- before do
- expect(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(true)
- end
+ context 'when lease_attempts is 0' do
+ let(:lease_key) { "#{described_class.name}:Foo:retried" }
- it 'reschedules a migration if the database is not healthy' do
- expect(described_class)
- .to receive(:perform_in)
- .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
+ it 'gives up performing the migration' do
+ expect(described_class).not_to receive(:perform_in)
+ expect(Sidekiq.logger).to receive(:warn).with(
+ class: 'Foo',
+ message: 'Job could not get an exclusive lease after several tries. Giving up.',
+ job_id: 1)
- worker.perform('Foo', [10, 20])
+ worker.perform('Foo', [10, 20], 0)
+ end
+ end
end
- it 'increments the unhealthy counter' do
- counter = Gitlab::Metrics.counter(:background_migration_database_health_reschedules, 'msg')
+ context 'when database is not healthy' do
+ before do
+ expect(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(true)
+ end
- expect(described_class).to receive(:perform_in)
+ it 'reschedules a migration if the database is not healthy' do
+ expect(described_class)
+ .to receive(:perform_in)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
- expect { worker.perform('Foo', [10, 20]) }.to change { counter.get(db_config_name: tracking_database) }.by(1)
- end
+ worker.perform('Foo', [10, 20])
+ end
+
+ it 'increments the unhealthy counter' do
+ counter = Gitlab::Metrics.counter(:background_migration_database_health_reschedules, 'msg')
+
+ expect(described_class).to receive(:perform_in)
+
+ expect { worker.perform('Foo', [10, 20]) }.to change { counter.get(db_config_name: tracking_database) }.by(1)
+ end
- context 'when lease_attempts is 0' do
- it 'gives up performing the migration' do
- expect(described_class).not_to receive(:perform_in)
- expect(Sidekiq.logger).to receive(:warn).with(
- class: 'Foo',
- message: 'Database was unhealthy after several tries. Giving up.',
- job_id: 1)
+ context 'when lease_attempts is 0' do
+ it 'gives up performing the migration' do
+ expect(described_class).not_to receive(:perform_in)
+ expect(Sidekiq.logger).to receive(:warn).with(
+ class: 'Foo',
+ message: 'Database was unhealthy after several tries. Giving up.',
+ job_id: 1)
- worker.perform('Foo', [10, 20], 0)
+ worker.perform('Foo', [10, 20], 0)
+ end
end
end
end