diff options
author | GitLab Bot <gitlab-bot@gitlab.com> | 2021-07-20 12:55:51 +0300 |
---|---|---|
committer | GitLab Bot <gitlab-bot@gitlab.com> | 2021-07-20 12:55:51 +0300 |
commit | e8d2c2579383897a1dd7f9debd359abe8ae8373d (patch) | |
tree | c42be41678c2586d49a75cabce89322082698334 /spec/lib/gitlab/database/load_balancing/sidekiq_server_middleware_spec.rb | |
parent | fc845b37ec3a90aaa719975f607740c22ba6a113 (diff) |
Add latest changes from gitlab-org/gitlab@14-1-stable-eev14.1.0-rc42
Diffstat (limited to 'spec/lib/gitlab/database/load_balancing/sidekiq_server_middleware_spec.rb')
-rw-r--r-- | spec/lib/gitlab/database/load_balancing/sidekiq_server_middleware_spec.rb | 113 |
1 files changed, 58 insertions, 55 deletions
diff --git a/spec/lib/gitlab/database/load_balancing/sidekiq_server_middleware_spec.rb b/spec/lib/gitlab/database/load_balancing/sidekiq_server_middleware_spec.rb index b7cd0caa922..14f240cd159 100644 --- a/spec/lib/gitlab/database/load_balancing/sidekiq_server_middleware_spec.rb +++ b/spec/lib/gitlab/database/load_balancing/sidekiq_server_middleware_spec.rb @@ -5,6 +5,19 @@ require 'spec_helper' RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do let(:middleware) { described_class.new } + let(:load_balancer) { double.as_null_object } + + let(:worker) { worker_class.new } + let(:job) { { "retry" => 3, "job_id" => "a180b47c-3fd6-41b8-81e9-34da61c3400e", 'database_replica_location' => '0/D525E3A8' } } + + before do + skip_feature_flags_yaml_validation + skip_default_enabled_yaml_check + allow(::Gitlab::Database::LoadBalancing).to receive_message_chain(:proxy, :load_balancer).and_return(load_balancer) + + replication_lag!(false) + end + after do Gitlab::Database::LoadBalancing::Session.clear_session end @@ -31,30 +44,34 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do end end - shared_examples_for 'stick to the primary' do + shared_examples_for 'load balancing strategy' do |strategy| + it "sets load balancing strategy to #{strategy}" do + run_middleware do + expect(job['load_balancing_strategy']).to eq(strategy) + end + end + end + + shared_examples_for 'stick to the primary' do |expected_strategy| it 'sticks to the primary' do - middleware.call(worker, job, double(:queue)) do + run_middleware do expect(Gitlab::Database::LoadBalancing::Session.current.use_primary?).to be_truthy end end + + include_examples 'load balancing strategy', expected_strategy end - shared_examples_for 'replica is up to date' do |location, data_consistency| + shared_examples_for 'replica is up to date' do |location, expected_strategy| it 'does not stick to the primary', :aggregate_failures do expect(middleware).to receive(:replica_caught_up?).with(location).and_return(true) - middleware.call(worker, job, double(:queue)) do + run_middleware do expect(Gitlab::Database::LoadBalancing::Session.current.use_primary?).not_to be_truthy end - - expect(job[:database_chosen]).to eq('replica') end - it "updates job hash with data_consistency :#{data_consistency}" do - middleware.call(worker, job, double(:queue)) do - expect(job).to include(data_consistency: data_consistency.to_s) - end - end + include_examples 'load balancing strategy', expected_strategy end shared_examples_for 'sticks based on data consistency' do |data_consistency| @@ -65,7 +82,7 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do stub_feature_flags(load_balancing_for_test_data_consistency_worker: false) end - include_examples 'stick to the primary' + include_examples 'stick to the primary', 'primary' end context 'when database replica location is set' do @@ -75,7 +92,7 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do allow(middleware).to receive(:replica_caught_up?).and_return(true) end - it_behaves_like 'replica is up to date', '0/D525E3A8', data_consistency + it_behaves_like 'replica is up to date', '0/D525E3A8', 'replica' end context 'when database primary location is set' do @@ -85,39 +102,26 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do allow(middleware).to receive(:replica_caught_up?).and_return(true) end - it_behaves_like 'replica is up to date', '0/D525E3A8', data_consistency + it_behaves_like 'replica is up to date', '0/D525E3A8', 'replica' end context 'when database location is not set' do let(:job) { { 'job_id' => 'a180b47c-3fd6-41b8-81e9-34da61c3400e' } } - it_behaves_like 'stick to the primary', nil + it_behaves_like 'stick to the primary', 'primary_no_wal' end end - let(:queue) { 'default' } - let(:redis_pool) { Sidekiq.redis_pool } - let(:worker) { worker_class.new } - let(:job) { { "retry" => 3, "job_id" => "a180b47c-3fd6-41b8-81e9-34da61c3400e", 'database_replica_location' => '0/D525E3A8' } } - let(:block) { 10 } - - before do - skip_feature_flags_yaml_validation - skip_default_enabled_yaml_check - allow(middleware).to receive(:clear) - allow(Gitlab::Database::LoadBalancing::Session.current).to receive(:performed_write?).and_return(true) - end - context 'when worker class does not include ApplicationWorker' do let(:worker) { ActiveJob::QueueAdapters::SidekiqAdapter::JobWrapper.new } - include_examples 'stick to the primary' + include_examples 'stick to the primary', 'primary' end context 'when worker data consistency is :always' do include_context 'data consistency worker class', :always, :load_balancing_for_test_data_consistency_worker - include_examples 'stick to the primary' + include_examples 'stick to the primary', 'primary' end context 'when worker data consistency is :delayed' do @@ -125,8 +129,7 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do context 'when replica is not up to date' do before do - allow(::Gitlab::Database::LoadBalancing).to receive_message_chain(:proxy, :load_balancer, :release_host) - allow(::Gitlab::Database::LoadBalancing).to receive_message_chain(:proxy, :load_balancer, :select_up_to_date_host).and_return(false) + replication_lag!(true) end around do |example| @@ -137,38 +140,34 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do end context 'when job is executed first' do - it 'raise an error and retries', :aggregate_failures do + it 'raises an error and retries', :aggregate_failures do expect do process_job(job) end.to raise_error(Sidekiq::JobRetry::Skip) expect(job['error_class']).to eq('Gitlab::Database::LoadBalancing::SidekiqServerMiddleware::JobReplicaNotUpToDate') - expect(job[:database_chosen]).to eq('retry') end + + include_examples 'load balancing strategy', 'retry' end context 'when job is retried' do - it 'stick to the primary', :aggregate_failures do + before do expect do process_job(job) end.to raise_error(Sidekiq::JobRetry::Skip) - - process_job(job) - expect(job[:database_chosen]).to eq('primary') end - end - context 'replica selection mechanism feature flag rollout' do - before do - stub_feature_flags(sidekiq_load_balancing_rotate_up_to_date_replica: false) + context 'and replica still lagging behind' do + include_examples 'stick to the primary', 'primary' end - it 'uses different implmentation' do - expect(::Gitlab::Database::LoadBalancing).to receive_message_chain(:proxy, :load_balancer, :host, :caught_up?).and_return(false) + context 'and replica is now up-to-date' do + before do + replication_lag!(false) + end - expect do - process_job(job) - end.to raise_error(Sidekiq::JobRetry::Skip) + it_behaves_like 'replica is up to date', '0/D525E3A8', 'replica_retried' end end end @@ -182,20 +181,24 @@ RSpec.describe Gitlab::Database::LoadBalancing::SidekiqServerMiddleware do allow(middleware).to receive(:replica_caught_up?).and_return(false) end - include_examples 'stick to the primary' - - it 'updates job hash with primary database chosen', :aggregate_failures do - expect { |b| middleware.call(worker, job, double(:queue), &b) }.to yield_control - - expect(job[:database_chosen]).to eq('primary') - end + include_examples 'stick to the primary', 'primary' end end end def process_job(job) - Sidekiq::JobRetry.new.local(worker_class, job, queue) do + Sidekiq::JobRetry.new.local(worker_class, job, 'default') do worker_class.process_job(job) end end + + def run_middleware + middleware.call(worker, job, double(:queue)) { yield } + rescue described_class::JobReplicaNotUpToDate + # we silence errors here that cause the job to retry + end + + def replication_lag!(exists) + allow(load_balancer).to receive(:select_up_to_date_host).and_return(!exists) + end end |