Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2020-09-19 04:45:44 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2020-09-19 04:45:44 +0300
commit85dc423f7090da0a52c73eb66faf22ddb20efff9 (patch)
tree9160f299afd8c80c038f08e1545be119f5e3f1e1 /spec/lib/gitlab/database
parent15c2c8c66dbe422588e5411eee7e68f1fa440bb8 (diff)
Add latest changes from gitlab-org/gitlab@13-4-stable-ee
Diffstat (limited to 'spec/lib/gitlab/database')
-rw-r--r--spec/lib/gitlab/database/background_migration_job_spec.rb9
-rw-r--r--spec/lib/gitlab/database/batch_count_spec.rb10
-rw-r--r--spec/lib/gitlab/database/concurrent_reindex_spec.rb207
-rw-r--r--spec/lib/gitlab/database/custom_structure_spec.rb1
-rw-r--r--spec/lib/gitlab/database/migration_helpers_spec.rb11
-rw-r--r--spec/lib/gitlab/database/migrations/background_migration_helpers_spec.rb2
-rw-r--r--spec/lib/gitlab/database/partitioning/partition_monitoring_spec.rb34
-rw-r--r--spec/lib/gitlab/database/partitioning_migration_helpers/backfill_partitioned_table_spec.rb9
-rw-r--r--spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb147
-rw-r--r--spec/lib/gitlab/database/schema_cleaner_spec.rb4
10 files changed, 425 insertions, 9 deletions
diff --git a/spec/lib/gitlab/database/background_migration_job_spec.rb b/spec/lib/gitlab/database/background_migration_job_spec.rb
index 40f47325be3..dd5bf8b512f 100644
--- a/spec/lib/gitlab/database/background_migration_job_spec.rb
+++ b/spec/lib/gitlab/database/background_migration_job_spec.rb
@@ -71,6 +71,15 @@ RSpec.describe Gitlab::Database::BackgroundMigrationJob do
expect(job4.reload).to be_pending
end
+ it 'returns the number of jobs updated' do
+ expect(described_class.succeeded.count).to eq(0)
+
+ jobs_updated = described_class.mark_all_as_succeeded('::TestJob', [1, 100])
+
+ expect(jobs_updated).to eq(2)
+ expect(described_class.succeeded.count).to eq(2)
+ end
+
context 'when previous matching jobs have already succeeded' do
let(:initial_time) { Time.now.round }
let!(:job1) { create(:background_migration_job, :succeeded, created_at: initial_time, updated_at: initial_time) }
diff --git a/spec/lib/gitlab/database/batch_count_spec.rb b/spec/lib/gitlab/database/batch_count_spec.rb
index 1f84a915cdc..71d3666602f 100644
--- a/spec/lib/gitlab/database/batch_count_spec.rb
+++ b/spec/lib/gitlab/database/batch_count_spec.rb
@@ -9,12 +9,16 @@ RSpec.describe Gitlab::Database::BatchCount do
let(:column) { :author_id }
let(:in_transaction) { false }
- let(:user) { create(:user) }
- let(:another_user) { create(:user) }
- before do
+ let_it_be(:user) { create(:user) }
+ let_it_be(:another_user) { create(:user) }
+
+ before_all do
create_list(:issue, 3, author: user)
create_list(:issue, 2, author: another_user)
+ end
+
+ before do
allow(ActiveRecord::Base.connection).to receive(:transaction_open?).and_return(in_transaction)
end
diff --git a/spec/lib/gitlab/database/concurrent_reindex_spec.rb b/spec/lib/gitlab/database/concurrent_reindex_spec.rb
new file mode 100644
index 00000000000..4e2c3f547d4
--- /dev/null
+++ b/spec/lib/gitlab/database/concurrent_reindex_spec.rb
@@ -0,0 +1,207 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Database::ConcurrentReindex, '#execute' do
+ subject { described_class.new(index_name, logger: logger) }
+
+ let(:table_name) { '_test_reindex_table' }
+ let(:column_name) { '_test_column' }
+ let(:index_name) { '_test_reindex_index' }
+ let(:logger) { double('logger', debug: nil, info: nil, error: nil ) }
+ let(:connection) { ActiveRecord::Base.connection }
+
+ before do
+ connection.execute(<<~SQL)
+ CREATE TABLE #{table_name} (
+ id serial NOT NULL PRIMARY KEY,
+ #{column_name} integer NOT NULL);
+
+ CREATE INDEX #{index_name} ON #{table_name} (#{column_name});
+ SQL
+ end
+
+ context 'when the index does not exist' do
+ before do
+ connection.execute(<<~SQL)
+ DROP INDEX #{index_name}
+ SQL
+ end
+
+ it 'raises an error' do
+ expect { subject.execute }.to raise_error(described_class::ReindexError, /does not exist/)
+ end
+ end
+
+ context 'when the index is unique' do
+ before do
+ connection.execute(<<~SQL)
+ DROP INDEX #{index_name};
+ CREATE UNIQUE INDEX #{index_name} ON #{table_name} (#{column_name})
+ SQL
+ end
+
+ it 'raises an error' do
+ expect do
+ subject.execute
+ end.to raise_error(described_class::ReindexError, /UNIQUE indexes are currently not supported/)
+ end
+ end
+
+ context 'replacing the original index with a rebuilt copy' do
+ let(:replacement_name) { 'tmp_reindex__test_reindex_index' }
+ let(:replaced_name) { 'old_reindex__test_reindex_index' }
+
+ let(:create_index) { "CREATE INDEX CONCURRENTLY #{replacement_name} ON public.#{table_name} USING btree (#{column_name})" }
+ let(:drop_index) { "DROP INDEX CONCURRENTLY IF EXISTS #{replacement_name}" }
+
+ let!(:original_index) { find_index_create_statement }
+
+ before do
+ allow(subject).to receive(:connection).and_return(connection)
+ allow(subject).to receive(:disable_statement_timeout).and_yield
+ end
+
+ it 'replaces the existing index with an identical index' do
+ expect(subject).to receive(:disable_statement_timeout).exactly(3).times.and_yield
+
+ expect_to_execute_concurrently_in_order(drop_index)
+ expect_to_execute_concurrently_in_order(create_index)
+
+ expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
+ expect(instance).to receive(:run).with(raise_on_exhaustion: true).and_yield
+ end
+
+ expect_to_execute_in_order("ALTER INDEX #{index_name} RENAME TO #{replaced_name}")
+ expect_to_execute_in_order("ALTER INDEX #{replacement_name} RENAME TO #{index_name}")
+ expect_to_execute_in_order("ALTER INDEX #{replaced_name} RENAME TO #{replacement_name}")
+
+ expect_to_execute_concurrently_in_order(drop_index)
+
+ subject.execute
+
+ check_index_exists
+ end
+
+ context 'when a dangling index is left from a previous run' do
+ before do
+ connection.execute("CREATE INDEX #{replacement_name} ON #{table_name} (#{column_name})")
+ end
+
+ it 'replaces the existing index with an identical index' do
+ expect(subject).to receive(:disable_statement_timeout).exactly(3).times.and_yield
+
+ expect_to_execute_concurrently_in_order(drop_index)
+ expect_to_execute_concurrently_in_order(create_index)
+
+ expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
+ expect(instance).to receive(:run).with(raise_on_exhaustion: true).and_yield
+ end
+
+ expect_to_execute_in_order("ALTER INDEX #{index_name} RENAME TO #{replaced_name}")
+ expect_to_execute_in_order("ALTER INDEX #{replacement_name} RENAME TO #{index_name}")
+ expect_to_execute_in_order("ALTER INDEX #{replaced_name} RENAME TO #{replacement_name}")
+
+ expect_to_execute_concurrently_in_order(drop_index)
+
+ subject.execute
+
+ check_index_exists
+ end
+ end
+
+ context 'when it fails to create the replacement index' do
+ it 'safely cleans up and signals the error' do
+ expect_to_execute_concurrently_in_order(drop_index)
+
+ expect(connection).to receive(:execute).with(create_index).ordered
+ .and_raise(ActiveRecord::ConnectionTimeoutError, 'connect timeout')
+
+ expect_to_execute_concurrently_in_order(drop_index)
+
+ expect { subject.execute }.to raise_error(described_class::ReindexError, /connect timeout/)
+
+ check_index_exists
+ end
+ end
+
+ context 'when the replacement index is not valid' do
+ it 'safely cleans up and signals the error' do
+ expect_to_execute_concurrently_in_order(drop_index)
+ expect_to_execute_concurrently_in_order(create_index)
+
+ expect(subject).to receive(:replacement_index_valid?).and_return(false)
+
+ expect_to_execute_concurrently_in_order(drop_index)
+
+ expect { subject.execute }.to raise_error(described_class::ReindexError, /replacement index was created as INVALID/)
+
+ check_index_exists
+ end
+ end
+
+ context 'when a database error occurs while swapping the indexes' do
+ it 'safely cleans up and signals the error' do
+ expect_to_execute_concurrently_in_order(drop_index)
+ expect_to_execute_concurrently_in_order(create_index)
+
+ expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
+ expect(instance).to receive(:run).with(raise_on_exhaustion: true).and_yield
+ end
+
+ expect(connection).to receive(:execute).ordered
+ .with("ALTER INDEX #{index_name} RENAME TO #{replaced_name}")
+ .and_raise(ActiveRecord::ConnectionTimeoutError, 'connect timeout')
+
+ expect_to_execute_concurrently_in_order(drop_index)
+
+ expect { subject.execute }.to raise_error(described_class::ReindexError, /connect timeout/)
+
+ check_index_exists
+ end
+ end
+
+ context 'when with_lock_retries fails to acquire the lock' do
+ it 'safely cleans up and signals the error' do
+ expect_to_execute_concurrently_in_order(drop_index)
+ expect_to_execute_concurrently_in_order(create_index)
+
+ expect_next_instance_of(::Gitlab::Database::WithLockRetries) do |instance|
+ expect(instance).to receive(:run).with(raise_on_exhaustion: true)
+ .and_raise(::Gitlab::Database::WithLockRetries::AttemptsExhaustedError, 'exhausted')
+ end
+
+ expect_to_execute_concurrently_in_order(drop_index)
+
+ expect { subject.execute }.to raise_error(described_class::ReindexError, /exhausted/)
+
+ check_index_exists
+ end
+ end
+ end
+
+ def expect_to_execute_concurrently_in_order(sql)
+ # Indexes cannot be created CONCURRENTLY in a transaction. Since the tests are wrapped in transactions,
+ # verify the original call but pass through the non-concurrent form.
+ expect(connection).to receive(:execute).with(sql).ordered.and_wrap_original do |method, sql|
+ method.call(sql.sub(/CONCURRENTLY/, ''))
+ end
+ end
+
+ def expect_to_execute_in_order(sql)
+ expect(connection).to receive(:execute).with(sql).ordered.and_call_original
+ end
+
+ def find_index_create_statement
+ ActiveRecord::Base.connection.select_value(<<~SQL)
+ SELECT indexdef
+ FROM pg_indexes
+ WHERE schemaname = 'public'
+ AND indexname = #{ActiveRecord::Base.connection.quote(index_name)}
+ SQL
+ end
+
+ def check_index_exists
+ expect(find_index_create_statement).to eq(original_index)
+ end
+end
diff --git a/spec/lib/gitlab/database/custom_structure_spec.rb b/spec/lib/gitlab/database/custom_structure_spec.rb
index b3bdca0acdd..04ce1e4ad9a 100644
--- a/spec/lib/gitlab/database/custom_structure_spec.rb
+++ b/spec/lib/gitlab/database/custom_structure_spec.rb
@@ -9,7 +9,6 @@ RSpec.describe Gitlab::Database::CustomStructure do
<<~DATA
-- this file tracks custom GitLab data, such as foreign keys referencing partitioned tables
-- more details can be found in the issue: https://gitlab.com/gitlab-org/gitlab/-/issues/201872
- SET search_path=public;
DATA
end
diff --git a/spec/lib/gitlab/database/migration_helpers_spec.rb b/spec/lib/gitlab/database/migration_helpers_spec.rb
index 7d26fbb1132..0bdcca630aa 100644
--- a/spec/lib/gitlab/database/migration_helpers_spec.rb
+++ b/spec/lib/gitlab/database/migration_helpers_spec.rb
@@ -903,15 +903,22 @@ RSpec.describe Gitlab::Database::MigrationHelpers do
describe '#change_column_type_concurrently' do
it 'changes the column type' do
expect(model).to receive(:rename_column_concurrently)
- .with('users', 'username', 'username_for_type_change', type: :text, type_cast_function: nil)
+ .with('users', 'username', 'username_for_type_change', type: :text, type_cast_function: nil, batch_column_name: :id)
model.change_column_type_concurrently('users', 'username', :text)
end
+ it 'passed the batch column name' do
+ expect(model).to receive(:rename_column_concurrently)
+ .with('users', 'username', 'username_for_type_change', type: :text, type_cast_function: nil, batch_column_name: :user_id)
+
+ model.change_column_type_concurrently('users', 'username', :text, batch_column_name: :user_id)
+ end
+
context 'with type cast' do
it 'changes the column type with casting the value to the new type' do
expect(model).to receive(:rename_column_concurrently)
- .with('users', 'username', 'username_for_type_change', type: :text, type_cast_function: 'JSON')
+ .with('users', 'username', 'username_for_type_change', type: :text, type_cast_function: 'JSON', batch_column_name: :id)
model.change_column_type_concurrently('users', 'username', :text, type_cast_function: 'JSON')
end
diff --git a/spec/lib/gitlab/database/migrations/background_migration_helpers_spec.rb b/spec/lib/gitlab/database/migrations/background_migration_helpers_spec.rb
index 042ac498373..48132d68031 100644
--- a/spec/lib/gitlab/database/migrations/background_migration_helpers_spec.rb
+++ b/spec/lib/gitlab/database/migrations/background_migration_helpers_spec.rb
@@ -86,7 +86,7 @@ RSpec.describe Gitlab::Database::Migrations::BackgroundMigrationHelpers do
let!(:id3) { create(:user).id }
around do |example|
- Timecop.freeze { example.run }
+ freeze_time { example.run }
end
before do
diff --git a/spec/lib/gitlab/database/partitioning/partition_monitoring_spec.rb b/spec/lib/gitlab/database/partitioning/partition_monitoring_spec.rb
new file mode 100644
index 00000000000..67596211f71
--- /dev/null
+++ b/spec/lib/gitlab/database/partitioning/partition_monitoring_spec.rb
@@ -0,0 +1,34 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::Database::Partitioning::PartitionMonitoring do
+ describe '#report_metrics' do
+ subject { described_class.new(models).report_metrics }
+
+ let(:models) { [model] }
+ let(:model) { double(partitioning_strategy: partitioning_strategy, table_name: table) }
+ let(:partitioning_strategy) { double(missing_partitions: missing_partitions, current_partitions: current_partitions) }
+ let(:table) { "some_table" }
+
+ let(:missing_partitions) do
+ [double]
+ end
+
+ let(:current_partitions) do
+ [double, double]
+ end
+
+ it 'reports number of present partitions' do
+ subject
+
+ expect(Gitlab::Metrics.registry.get(:db_partitions_present).get({ table: table })).to eq(current_partitions.size)
+ end
+
+ it 'reports number of missing partitions' do
+ subject
+
+ expect(Gitlab::Metrics.registry.get(:db_partitions_missing).get({ table: table })).to eq(missing_partitions.size)
+ end
+ end
+end
diff --git a/spec/lib/gitlab/database/partitioning_migration_helpers/backfill_partitioned_table_spec.rb b/spec/lib/gitlab/database/partitioning_migration_helpers/backfill_partitioned_table_spec.rb
index 49f3f87fe61..ec3d0a6dbcb 100644
--- a/spec/lib/gitlab/database/partitioning_migration_helpers/backfill_partitioned_table_spec.rb
+++ b/spec/lib/gitlab/database/partitioning_migration_helpers/backfill_partitioned_table_spec.rb
@@ -107,6 +107,15 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::BackfillPartition
end.to change { ::Gitlab::Database::BackgroundMigrationJob.succeeded.count }.from(0).to(1)
end
+ it 'returns the number of job records marked as succeeded' do
+ create(:background_migration_job, class_name: "::#{described_class.name}",
+ arguments: [source1.id, source3.id, source_table, destination_table, unique_key])
+
+ jobs_updated = subject.perform(source1.id, source3.id, source_table, destination_table, unique_key)
+
+ expect(jobs_updated).to eq(1)
+ end
+
context 'when the feature flag is disabled' do
let(:mock_connection) { double('connection') }
diff --git a/spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb b/spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb
index 86f79b213ae..44ef0b307fe 100644
--- a/spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb
+++ b/spec/lib/gitlab/database/partitioning_migration_helpers/table_management_helpers_spec.rb
@@ -480,6 +480,153 @@ RSpec.describe Gitlab::Database::PartitioningMigrationHelpers::TableManagementHe
end
end
+ describe '#finalize_backfilling_partitioned_table' do
+ let(:source_table) { 'todos' }
+ let(:source_column) { 'id' }
+
+ context 'when the table is not allowed' do
+ let(:source_table) { :this_table_is_not_allowed }
+
+ it 'raises an error' do
+ expect(migration).to receive(:assert_table_is_allowed).with(source_table).and_call_original
+
+ expect do
+ migration.finalize_backfilling_partitioned_table source_table
+ end.to raise_error(/#{source_table} is not allowed for use/)
+ end
+ end
+
+ context 'when the partitioned table does not exist' do
+ it 'raises an error' do
+ expect(migration).to receive(:table_exists?).with(partitioned_table).and_return(false)
+
+ expect do
+ migration.finalize_backfilling_partitioned_table source_table
+ end.to raise_error(/could not find partitioned table for #{source_table}/)
+ end
+ end
+
+ context 'finishing pending background migration jobs' do
+ let(:source_table_double) { double('table name') }
+ let(:raw_arguments) { [1, 50_000, source_table_double, partitioned_table, source_column] }
+
+ before do
+ allow(migration).to receive(:table_exists?).with(partitioned_table).and_return(true)
+ allow(migration).to receive(:copy_missed_records)
+ allow(migration).to receive(:execute).with(/VACUUM/)
+ end
+
+ it 'finishes remaining jobs for the correct table' do
+ expect_next_instance_of(described_class::JobArguments) do |job_arguments|
+ expect(job_arguments).to receive(:source_table_name).and_call_original
+ end
+
+ expect(Gitlab::BackgroundMigration).to receive(:steal)
+ .with(described_class::MIGRATION_CLASS_NAME)
+ .and_yield(raw_arguments)
+
+ expect(source_table_double).to receive(:==).with(source_table.to_s)
+
+ migration.finalize_backfilling_partitioned_table source_table
+ end
+ end
+
+ context 'when there is missed data' do
+ let(:partitioned_model) { Class.new(ActiveRecord::Base) }
+ let(:timestamp) { Time.utc(2019, 12, 1, 12).round }
+ let!(:todo1) { create(:todo, created_at: timestamp, updated_at: timestamp) }
+ let!(:todo2) { create(:todo, created_at: timestamp, updated_at: timestamp) }
+ let!(:todo3) { create(:todo, created_at: timestamp, updated_at: timestamp) }
+ let!(:todo4) { create(:todo, created_at: timestamp, updated_at: timestamp) }
+
+ let!(:pending_job1) do
+ create(:background_migration_job,
+ class_name: described_class::MIGRATION_CLASS_NAME,
+ arguments: [todo1.id, todo2.id, source_table, partitioned_table, source_column])
+ end
+
+ let!(:pending_job2) do
+ create(:background_migration_job,
+ class_name: described_class::MIGRATION_CLASS_NAME,
+ arguments: [todo3.id, todo3.id, source_table, partitioned_table, source_column])
+ end
+
+ let!(:succeeded_job) do
+ create(:background_migration_job, :succeeded,
+ class_name: described_class::MIGRATION_CLASS_NAME,
+ arguments: [todo4.id, todo4.id, source_table, partitioned_table, source_column])
+ end
+
+ before do
+ partitioned_model.primary_key = :id
+ partitioned_model.table_name = partitioned_table
+
+ allow(migration).to receive(:queue_background_migration_jobs_by_range_at_intervals)
+
+ migration.partition_table_by_date source_table, partition_column, min_date: min_date, max_date: max_date
+
+ allow(Gitlab::BackgroundMigration).to receive(:steal)
+ allow(migration).to receive(:execute).with(/VACUUM/)
+ end
+
+ it 'idempotently cleans up after failed background migrations' do
+ expect(partitioned_model.count).to eq(0)
+
+ partitioned_model.insert!(todo2.attributes)
+
+ expect_next_instance_of(Gitlab::Database::PartitioningMigrationHelpers::BackfillPartitionedTable) do |backfill|
+ allow(backfill).to receive(:transaction_open?).and_return(false)
+
+ expect(backfill).to receive(:perform)
+ .with(todo1.id, todo2.id, source_table, partitioned_table, source_column)
+ .and_call_original
+
+ expect(backfill).to receive(:perform)
+ .with(todo3.id, todo3.id, source_table, partitioned_table, source_column)
+ .and_call_original
+ end
+
+ migration.finalize_backfilling_partitioned_table source_table
+
+ expect(partitioned_model.count).to eq(3)
+
+ [todo1, todo2, todo3].each do |original|
+ copy = partitioned_model.find(original.id)
+ expect(copy.attributes).to eq(original.attributes)
+ end
+
+ expect(partitioned_model.find_by_id(todo4.id)).to be_nil
+
+ [pending_job1, pending_job2].each do |job|
+ expect(job.reload).to be_succeeded
+ end
+ end
+
+ it 'raises an error if no job tracking records are marked as succeeded' do
+ expect_next_instance_of(Gitlab::Database::PartitioningMigrationHelpers::BackfillPartitionedTable) do |backfill|
+ allow(backfill).to receive(:transaction_open?).and_return(false)
+
+ expect(backfill).to receive(:perform).and_return(0)
+ end
+
+ expect do
+ migration.finalize_backfilling_partitioned_table source_table
+ end.to raise_error(/failed to update tracking record/)
+ end
+
+ it 'vacuums the table after loading is complete' do
+ expect_next_instance_of(Gitlab::Database::PartitioningMigrationHelpers::BackfillPartitionedTable) do |backfill|
+ allow(backfill).to receive(:perform).and_return(1)
+ end
+
+ expect(migration).to receive(:disable_statement_timeout).and_call_original
+ expect(migration).to receive(:execute).with("VACUUM FREEZE ANALYZE #{partitioned_table}")
+
+ migration.finalize_backfilling_partitioned_table source_table
+ end
+ end
+ end
+
def filter_columns_by_name(columns, names)
columns.reject { |c| names.include?(c.name) }
end
diff --git a/spec/lib/gitlab/database/schema_cleaner_spec.rb b/spec/lib/gitlab/database/schema_cleaner_spec.rb
index 1303ad7a311..950759c7f96 100644
--- a/spec/lib/gitlab/database/schema_cleaner_spec.rb
+++ b/spec/lib/gitlab/database/schema_cleaner_spec.rb
@@ -15,8 +15,8 @@ RSpec.describe Gitlab::Database::SchemaCleaner do
expect(subject).not_to include('COMMENT ON EXTENSION')
end
- it 'sets the search_path' do
- expect(subject.split("\n").first).to eq('SET search_path=public;')
+ it 'no assumption about public being the default schema' do
+ expect(subject).not_to match(/public\.\w+/)
end
it 'cleans up the full schema as expected (blackbox test with example)' do