Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'spec/support/shared_examples/workers')
-rw-r--r--spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb238
-rw-r--r--spec/support/shared_examples/workers/batched_background_migration_worker_shared_examples.rb42
-rw-r--r--spec/support/shared_examples/workers/idempotency_shared_examples.rb14
3 files changed, 183 insertions, 111 deletions
diff --git a/spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb b/spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb
index 7fdf049a823..8ecb04bfdd6 100644
--- a/spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb
+++ b/spec/support/shared_examples/workers/background_migration_worker_shared_examples.rb
@@ -42,159 +42,195 @@ RSpec.shared_examples 'it runs background migration jobs' do |tracking_database|
describe '#perform' do
let(:worker) { described_class.new }
- before do
- allow(worker).to receive(:jid).and_return(1)
- allow(worker).to receive(:always_perform?).and_return(false)
+ context 'when execute_background_migrations feature flag is disabled' do
+ before do
+ stub_feature_flags(execute_background_migrations: false)
+ end
- allow(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(false)
- end
+ it 'does not perform the job, reschedules it in the future, and logs a message' do
+ expect(worker).not_to receive(:perform_with_connection)
- it 'performs jobs using the coordinator for the worker' do
- expect_next_instance_of(Gitlab::BackgroundMigration::JobCoordinator) do |coordinator|
- allow(coordinator).to receive(:with_shared_connection).and_yield
+ expect(Sidekiq.logger).to receive(:info) do |payload|
+ expect(payload[:class]).to eq(described_class.name)
+ expect(payload[:database]).to eq(tracking_database)
+ expect(payload[:message]).to match(/skipping execution, migration rescheduled/)
+ end
- expect(coordinator.worker_class).to eq(described_class)
- expect(coordinator).to receive(:perform).with('Foo', [10, 20])
- end
+ lease_attempts = 3
+ delay = described_class::BACKGROUND_MIGRATIONS_DELAY
+ job_args = [10, 20]
- worker.perform('Foo', [10, 20])
- end
+ freeze_time do
+ worker.perform('Foo', job_args, lease_attempts)
- context 'when lease can be obtained' do
- let(:coordinator) { double('job coordinator') }
+ job = described_class.jobs.find { |job| job['args'] == ['Foo', job_args, lease_attempts] }
+ expect(job).to be, "Expected the job to be rescheduled with (#{job_args}, #{lease_attempts}), but it was not."
+ expected_time = delay.to_i + Time.now.to_i
+ expect(job['at']).to eq(expected_time),
+ "Expected the job to be rescheduled in #{expected_time} seconds, " \
+ "but it was rescheduled in #{job['at']} seconds."
+ end
+ end
+ end
+
+ context 'when execute_background_migrations feature flag is enabled' do
before do
- allow(Gitlab::BackgroundMigration).to receive(:coordinator_for_database)
- .with(tracking_database)
- .and_return(coordinator)
+ stub_feature_flags(execute_background_migrations: true)
- allow(coordinator).to receive(:with_shared_connection).and_yield
+ allow(worker).to receive(:jid).and_return(1)
+ allow(worker).to receive(:always_perform?).and_return(false)
+
+ allow(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(false)
end
- it 'sets up the shared connection before checking replication' do
- expect(coordinator).to receive(:with_shared_connection).and_yield.ordered
- expect(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(false).ordered
+ it 'performs jobs using the coordinator for the worker' do
+ expect_next_instance_of(Gitlab::BackgroundMigration::JobCoordinator) do |coordinator|
+ allow(coordinator).to receive(:with_shared_connection).and_yield
- expect(coordinator).to receive(:perform).with('Foo', [10, 20])
+ expect(coordinator.worker_class).to eq(described_class)
+ expect(coordinator).to receive(:perform).with('Foo', [10, 20])
+ end
worker.perform('Foo', [10, 20])
end
- it 'performs a background migration' do
- expect(coordinator).to receive(:perform).with('Foo', [10, 20])
+ context 'when lease can be obtained' do
+ let(:coordinator) { double('job coordinator') }
- worker.perform('Foo', [10, 20])
- end
+ before do
+ allow(Gitlab::BackgroundMigration).to receive(:coordinator_for_database)
+ .with(tracking_database)
+ .and_return(coordinator)
+
+ allow(coordinator).to receive(:with_shared_connection).and_yield
+ end
+
+ it 'sets up the shared connection before checking replication' do
+ expect(coordinator).to receive(:with_shared_connection).and_yield.ordered
+ expect(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(false).ordered
- context 'when lease_attempts is 1' do
- it 'performs a background migration' do
expect(coordinator).to receive(:perform).with('Foo', [10, 20])
- worker.perform('Foo', [10, 20], 1)
+ worker.perform('Foo', [10, 20])
end
- end
- it 'can run scheduled job and retried job concurrently' do
- expect(coordinator)
- .to receive(:perform)
- .with('Foo', [10, 20])
- .exactly(2).time
-
- worker.perform('Foo', [10, 20])
- worker.perform('Foo', [10, 20], described_class::MAX_LEASE_ATTEMPTS - 1)
- end
+ it 'performs a background migration' do
+ expect(coordinator).to receive(:perform).with('Foo', [10, 20])
- it 'sets the class that will be executed as the caller_id' do
- expect(coordinator).to receive(:perform) do
- expect(Gitlab::ApplicationContext.current).to include('meta.caller_id' => 'Foo')
+ worker.perform('Foo', [10, 20])
end
- worker.perform('Foo', [10, 20])
- end
- end
+ context 'when lease_attempts is 1' do
+ it 'performs a background migration' do
+ expect(coordinator).to receive(:perform).with('Foo', [10, 20])
- context 'when lease not obtained (migration of same class was performed recently)' do
- let(:timeout) { described_class.minimum_interval }
- let(:lease_key) { "#{described_class.name}:Foo" }
- let(:coordinator) { double('job coordinator') }
+ worker.perform('Foo', [10, 20], 1)
+ end
+ end
- before do
- allow(Gitlab::BackgroundMigration).to receive(:coordinator_for_database)
- .with(tracking_database)
- .and_return(coordinator)
+ it 'can run scheduled job and retried job concurrently' do
+ expect(coordinator)
+ .to receive(:perform)
+ .with('Foo', [10, 20])
+ .exactly(2).time
- allow(coordinator).to receive(:with_shared_connection).and_yield
+ worker.perform('Foo', [10, 20])
+ worker.perform('Foo', [10, 20], described_class::MAX_LEASE_ATTEMPTS - 1)
+ end
- expect(coordinator).not_to receive(:perform)
+ it 'sets the class that will be executed as the caller_id' do
+ expect(coordinator).to receive(:perform) do
+ expect(Gitlab::ApplicationContext.current).to include('meta.caller_id' => 'Foo')
+ end
- Gitlab::ExclusiveLease.new(lease_key, timeout: timeout).try_obtain
+ worker.perform('Foo', [10, 20])
+ end
end
- it 'reschedules the migration and decrements the lease_attempts' do
- expect(described_class)
- .to receive(:perform_in)
- .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
+ context 'when lease not obtained (migration of same class was performed recently)' do
+ let(:timeout) { described_class.minimum_interval }
+ let(:lease_key) { "#{described_class.name}:Foo" }
+ let(:coordinator) { double('job coordinator') }
- worker.perform('Foo', [10, 20], 5)
- end
+ before do
+ allow(Gitlab::BackgroundMigration).to receive(:coordinator_for_database)
+ .with(tracking_database)
+ .and_return(coordinator)
- context 'when lease_attempts is 1' do
- let(:lease_key) { "#{described_class.name}:Foo:retried" }
+ allow(coordinator).to receive(:with_shared_connection).and_yield
+
+ expect(coordinator).not_to receive(:perform)
+
+ Gitlab::ExclusiveLease.new(lease_key, timeout: timeout).try_obtain
+ end
it 'reschedules the migration and decrements the lease_attempts' do
expect(described_class)
.to receive(:perform_in)
- .with(a_kind_of(Numeric), 'Foo', [10, 20], 0)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
- worker.perform('Foo', [10, 20], 1)
+ worker.perform('Foo', [10, 20], 5)
end
- end
- context 'when lease_attempts is 0' do
- let(:lease_key) { "#{described_class.name}:Foo:retried" }
+ context 'when lease_attempts is 1' do
+ let(:lease_key) { "#{described_class.name}:Foo:retried" }
- it 'gives up performing the migration' do
- expect(described_class).not_to receive(:perform_in)
- expect(Sidekiq.logger).to receive(:warn).with(
- class: 'Foo',
- message: 'Job could not get an exclusive lease after several tries. Giving up.',
- job_id: 1)
+ it 'reschedules the migration and decrements the lease_attempts' do
+ expect(described_class)
+ .to receive(:perform_in)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 0)
- worker.perform('Foo', [10, 20], 0)
+ worker.perform('Foo', [10, 20], 1)
+ end
end
- end
- end
- context 'when database is not healthy' do
- before do
- expect(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(true)
- end
+ context 'when lease_attempts is 0' do
+ let(:lease_key) { "#{described_class.name}:Foo:retried" }
- it 'reschedules a migration if the database is not healthy' do
- expect(described_class)
- .to receive(:perform_in)
- .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
+ it 'gives up performing the migration' do
+ expect(described_class).not_to receive(:perform_in)
+ expect(Sidekiq.logger).to receive(:warn).with(
+ class: 'Foo',
+ message: 'Job could not get an exclusive lease after several tries. Giving up.',
+ job_id: 1)
- worker.perform('Foo', [10, 20])
+ worker.perform('Foo', [10, 20], 0)
+ end
+ end
end
- it 'increments the unhealthy counter' do
- counter = Gitlab::Metrics.counter(:background_migration_database_health_reschedules, 'msg')
+ context 'when database is not healthy' do
+ before do
+ expect(Postgresql::ReplicationSlot).to receive(:lag_too_great?).and_return(true)
+ end
- expect(described_class).to receive(:perform_in)
+ it 'reschedules a migration if the database is not healthy' do
+ expect(described_class)
+ .to receive(:perform_in)
+ .with(a_kind_of(Numeric), 'Foo', [10, 20], 4)
- expect { worker.perform('Foo', [10, 20]) }.to change { counter.get(db_config_name: tracking_database) }.by(1)
- end
+ worker.perform('Foo', [10, 20])
+ end
+
+ it 'increments the unhealthy counter' do
+ counter = Gitlab::Metrics.counter(:background_migration_database_health_reschedules, 'msg')
+
+ expect(described_class).to receive(:perform_in)
+
+ expect { worker.perform('Foo', [10, 20]) }.to change { counter.get(db_config_name: tracking_database) }.by(1)
+ end
- context 'when lease_attempts is 0' do
- it 'gives up performing the migration' do
- expect(described_class).not_to receive(:perform_in)
- expect(Sidekiq.logger).to receive(:warn).with(
- class: 'Foo',
- message: 'Database was unhealthy after several tries. Giving up.',
- job_id: 1)
+ context 'when lease_attempts is 0' do
+ it 'gives up performing the migration' do
+ expect(described_class).not_to receive(:perform_in)
+ expect(Sidekiq.logger).to receive(:warn).with(
+ class: 'Foo',
+ message: 'Database was unhealthy after several tries. Giving up.',
+ job_id: 1)
- worker.perform('Foo', [10, 20], 0)
+ worker.perform('Foo', [10, 20], 0)
+ end
end
end
end
diff --git a/spec/support/shared_examples/workers/batched_background_migration_worker_shared_examples.rb b/spec/support/shared_examples/workers/batched_background_migration_worker_shared_examples.rb
index 3d4e840fe2d..54962eac100 100644
--- a/spec/support/shared_examples/workers/batched_background_migration_worker_shared_examples.rb
+++ b/spec/support/shared_examples/workers/batched_background_migration_worker_shared_examples.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_database, feature_flag:|
+RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_database|
include ExclusiveLeaseHelpers
describe 'defining the job attributes' do
@@ -40,13 +40,17 @@ RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_d
end
describe '.enabled?' do
- it 'does not raise an error' do
- expect { described_class.enabled? }.not_to raise_error
- end
+ it 'returns true when execute_batched_migrations_on_schedule feature flag is enabled' do
+ stub_feature_flags(execute_batched_migrations_on_schedule: true)
- it 'returns true' do
expect(described_class.enabled?).to be_truthy
end
+
+ it 'returns false when execute_batched_migrations_on_schedule feature flag is disabled' do
+ stub_feature_flags(execute_batched_migrations_on_schedule: false)
+
+ expect(described_class.enabled?).to be_falsey
+ end
end
describe '#perform' do
@@ -86,7 +90,7 @@ RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_d
context 'when the feature flag is disabled' do
before do
- stub_feature_flags(feature_flag => false)
+ stub_feature_flags(execute_batched_migrations_on_schedule: false)
end
it 'does nothing' do
@@ -98,10 +102,26 @@ RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_d
end
context 'when the feature flag is enabled' do
+ let(:base_model) { Gitlab::Database.database_base_models[tracking_database] }
+
before do
- stub_feature_flags(feature_flag => true)
+ stub_feature_flags(execute_batched_migrations_on_schedule: true)
- allow(Gitlab::Database::BackgroundMigration::BatchedMigration).to receive(:active_migration).and_return(nil)
+ allow(Gitlab::Database::BackgroundMigration::BatchedMigration).to receive(:active_migration)
+ .with(connection: base_model.connection)
+ .and_return(nil)
+ end
+
+ context 'when database config is shared' do
+ it 'does nothing' do
+ expect(Gitlab::Database).to receive(:db_config_share_with)
+ .with(base_model.connection_db_config).and_return('main')
+
+ expect(worker).not_to receive(:active_migration)
+ expect(worker).not_to receive(:run_active_migration)
+
+ worker.perform
+ end
end
context 'when no active migrations exist' do
@@ -121,6 +141,7 @@ RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_d
before do
allow(Gitlab::Database::BackgroundMigration::BatchedMigration).to receive(:active_migration)
+ .with(connection: base_model.connection)
.and_return(migration)
allow(migration).to receive(:interval_elapsed?).with(variance: interval_variance).and_return(true)
@@ -222,6 +243,7 @@ RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_d
end
end
+ let(:gitlab_schema) { "gitlab_#{tracking_database}" }
let!(:migration) do
create(
:batched_background_migration,
@@ -232,10 +254,12 @@ RSpec.shared_examples 'it runs batched background migration jobs' do |tracking_d
batch_size: batch_size,
sub_batch_size: sub_batch_size,
job_class_name: 'ExampleDataMigration',
- job_arguments: [1]
+ job_arguments: [1],
+ gitlab_schema: gitlab_schema
)
end
+ let(:base_model) { Gitlab::Database.database_base_models[tracking_database] }
let(:table_name) { 'example_data' }
let(:batch_size) { 5 }
let(:sub_batch_size) { 2 }
diff --git a/spec/support/shared_examples/workers/idempotency_shared_examples.rb b/spec/support/shared_examples/workers/idempotency_shared_examples.rb
index 9d9b371d61a..be43ea7d5f0 100644
--- a/spec/support/shared_examples/workers/idempotency_shared_examples.rb
+++ b/spec/support/shared_examples/workers/idempotency_shared_examples.rb
@@ -20,7 +20,11 @@ RSpec.shared_examples 'an idempotent worker' do
# Avoid stubbing calls for a more accurate run.
subject do
- defined?(job_args) ? perform_multiple(job_args) : perform_multiple
+ if described_class.include?(::Gitlab::EventStore::Subscriber)
+ event_worker
+ else
+ standard_worker
+ end
end
it 'is labeled as idempotent' do
@@ -30,4 +34,12 @@ RSpec.shared_examples 'an idempotent worker' do
it 'performs multiple times sequentially without raising an exception' do
expect { subject }.not_to raise_error
end
+
+ def event_worker
+ consume_event(subscriber: described_class, event: event)
+ end
+
+ def standard_worker
+ defined?(job_args) ? perform_multiple(job_args) : perform_multiple
+ end
end