Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2022-06-20 14:10:13 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2022-06-20 14:10:13 +0300
commit0ea3fcec397b69815975647f5e2aa5fe944a8486 (patch)
tree7979381b89d26011bcf9bdc989a40fcc2f1ed4ff /spec/workers
parent72123183a20411a36d607d70b12d57c484394c8e (diff)
Add latest changes from gitlab-org/gitlab@15-1-stable-eev15.1.0-rc42
Diffstat (limited to 'spec/workers')
-rw-r--r--spec/workers/build_success_worker_spec.rb17
-rw-r--r--spec/workers/bulk_imports/pipeline_worker_spec.rb142
-rw-r--r--spec/workers/ci/archive_trace_worker_spec.rb28
-rw-r--r--spec/workers/ci/resource_groups/assign_resource_from_resource_group_worker_spec.rb2
-rw-r--r--spec/workers/clusters/applications/activate_integration_worker_spec.rb (renamed from spec/workers/clusters/applications/activate_service_worker_spec.rb)21
-rw-r--r--spec/workers/clusters/applications/deactivate_integration_worker_spec.rb (renamed from spec/workers/clusters/applications/deactivate_service_worker_spec.rb)31
-rw-r--r--spec/workers/clusters/applications/wait_for_uninstall_app_worker_spec.rb4
-rw-r--r--spec/workers/concerns/cronjob_queue_spec.rb32
-rw-r--r--spec/workers/concerns/limited_capacity/job_tracker_spec.rb2
-rw-r--r--spec/workers/concerns/worker_attributes_spec.rb106
-rw-r--r--spec/workers/container_registry/migration/enqueuer_worker_spec.rb788
-rw-r--r--spec/workers/container_registry/migration/guard_worker_spec.rb106
-rw-r--r--spec/workers/database/batched_background_migration/ci_database_worker_spec.rb2
-rw-r--r--spec/workers/database/batched_background_migration_worker_spec.rb2
-rw-r--r--spec/workers/database/ci_namespace_mirrors_consistency_check_worker_spec.rb29
-rw-r--r--spec/workers/database/ci_project_mirrors_consistency_check_worker_spec.rb29
-rw-r--r--spec/workers/delete_container_repository_worker_spec.rb120
-rw-r--r--spec/workers/deployments/hooks_worker_spec.rb8
-rw-r--r--spec/workers/environments/auto_stop_worker_spec.rb2
-rw-r--r--spec/workers/every_sidekiq_worker_spec.rb15
-rw-r--r--spec/workers/expire_job_cache_worker_spec.rb31
-rw-r--r--spec/workers/expire_pipeline_cache_worker_spec.rb38
-rw-r--r--spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb2
-rw-r--r--spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb2
-rw-r--r--spec/workers/gitlab_service_ping_worker_spec.rb56
-rw-r--r--spec/workers/integrations/execute_worker_spec.rb (renamed from spec/workers/project_service_worker_spec.rb)24
-rw-r--r--spec/workers/integrations/irker_worker_spec.rb (renamed from spec/workers/irker_worker_spec.rb)22
-rw-r--r--spec/workers/issue_placement_worker_spec.rb151
-rw-r--r--spec/workers/issue_rebalancing_worker_spec.rb104
-rw-r--r--spec/workers/loose_foreign_keys/cleanup_worker_spec.rb8
-rw-r--r--spec/workers/merge_requests/create_pipeline_worker_spec.rb37
-rw-r--r--spec/workers/merge_requests/update_head_pipeline_worker_spec.rb28
-rw-r--r--spec/workers/namespaceless_project_destroy_worker_spec.rb77
-rw-r--r--spec/workers/pages_transfer_worker_spec.rb38
-rw-r--r--spec/workers/pipeline_hooks_worker_spec.rb10
-rw-r--r--spec/workers/pipeline_notification_worker_spec.rb14
-rw-r--r--spec/workers/project_daily_statistics_worker_spec.rb35
-rw-r--r--spec/workers/projects/inactive_projects_deletion_cron_worker_spec.rb48
-rw-r--r--spec/workers/prometheus/create_default_alerts_worker_spec.rb13
-rw-r--r--spec/workers/repository_remove_remote_worker_spec.rb48
-rw-r--r--spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb6
-rw-r--r--spec/workers/terraform/states/destroy_worker_spec.rb30
-rw-r--r--spec/workers/update_merge_requests_worker_spec.rb49
-rw-r--r--spec/workers/users/deactivate_dormant_users_worker_spec.rb9
-rw-r--r--spec/workers/web_hooks/destroy_worker_spec.rb23
-rw-r--r--spec/workers/web_hooks/log_destroy_worker_spec.rb86
46 files changed, 1115 insertions, 1360 deletions
diff --git a/spec/workers/build_success_worker_spec.rb b/spec/workers/build_success_worker_spec.rb
index 0583d79ed46..3241c931dc5 100644
--- a/spec/workers/build_success_worker_spec.rb
+++ b/spec/workers/build_success_worker_spec.rb
@@ -8,7 +8,7 @@ RSpec.describe BuildSuccessWorker do
context 'when build exists' do
context 'when the build will stop an environment' do
- let!(:build) { create(:ci_build, :stop_review_app, environment: environment.name, project: environment.project) }
+ let!(:build) { create(:ci_build, :stop_review_app, environment: environment.name, project: environment.project, status: :success) }
let(:environment) { create(:environment, state: :available) }
it 'stops the environment' do
@@ -18,6 +18,21 @@ RSpec.describe BuildSuccessWorker do
expect(environment.reload).to be_stopped
end
+
+ context 'when the build fails' do
+ before do
+ build.update!(status: :failed)
+ environment.update!(state: :available)
+ end
+
+ it 'does not stop the environment' do
+ expect(environment).to be_available
+
+ subject
+
+ expect(environment.reload).not_to be_stopped
+ end
+ end
end
end
diff --git a/spec/workers/bulk_imports/pipeline_worker_spec.rb b/spec/workers/bulk_imports/pipeline_worker_spec.rb
index 209ae8862b6..b5f20e9ff76 100644
--- a/spec/workers/bulk_imports/pipeline_worker_spec.rb
+++ b/spec/workers/bulk_imports/pipeline_worker_spec.rb
@@ -22,9 +22,10 @@ RSpec.describe BulkImports::PipelineWorker do
before do
stub_const('FakePipeline', pipeline_class)
+ allow(entity).to receive(:pipeline_exists?).with('FakePipeline').and_return(true)
allow_next_instance_of(BulkImports::Groups::Stage) do |instance|
allow(instance).to receive(:pipelines)
- .and_return([[0, pipeline_class]])
+ .and_return([{ stage: 0, pipeline: pipeline_class }])
end
end
@@ -101,18 +102,26 @@ RSpec.describe BulkImports::PipelineWorker do
pipeline_tracker = create(
:bulk_import_tracker,
entity: entity,
- pipeline_name: 'InexistentPipeline',
+ pipeline_name: 'FakePipeline',
status_event: 'enqueue'
)
+ allow(subject).to receive(:jid).and_return('jid')
+
+ expect_next_instance_of(pipeline_class) do |pipeline|
+ expect(pipeline)
+ .to receive(:run)
+ .and_raise(StandardError, 'Error!')
+ end
+
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:error)
.with(
hash_including(
- 'pipeline_name' => 'InexistentPipeline',
+ 'pipeline_name' => 'FakePipeline',
'entity_id' => entity.id,
- 'message' => "'InexistentPipeline' is not a valid BulkImport Pipeline"
+ 'message' => 'Error!'
)
)
end
@@ -120,7 +129,7 @@ RSpec.describe BulkImports::PipelineWorker do
expect(Gitlab::ErrorTracking)
.to receive(:track_exception)
.with(
- instance_of(BulkImports::Error),
+ instance_of(StandardError),
entity_id: entity.id,
pipeline_name: pipeline_tracker.pipeline_name
)
@@ -129,7 +138,18 @@ RSpec.describe BulkImports::PipelineWorker do
.to receive(:perform_async)
.with(entity.id, pipeline_tracker.stage)
- allow(subject).to receive(:jid).and_return('jid')
+ expect(BulkImports::Failure)
+ .to receive(:create)
+ .with(
+ a_hash_including(
+ bulk_import_entity_id: entity.id,
+ pipeline_class: 'FakePipeline',
+ pipeline_step: 'pipeline_worker_run',
+ exception_class: 'StandardError',
+ exception_message: 'Error!',
+ correlation_id_value: anything
+ )
+ )
subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
@@ -144,18 +164,19 @@ RSpec.describe BulkImports::PipelineWorker do
pipeline_tracker = create(
:bulk_import_tracker,
entity: entity,
- pipeline_name: 'Pipeline',
+ pipeline_name: 'FakePipeline',
status_event: 'enqueue'
)
entity.update!(status: -1)
+ expect(BulkImports::Failure).to receive(:create)
expect_next_instance_of(Gitlab::Import::Logger) do |logger|
expect(logger)
.to receive(:error)
.with(
hash_including(
- 'pipeline_name' => 'Pipeline',
+ 'pipeline_name' => 'FakePipeline',
'entity_id' => entity.id,
'message' => 'Failed entity status'
)
@@ -168,56 +189,78 @@ RSpec.describe BulkImports::PipelineWorker do
end
end
- context 'when it is a network error' do
- it 'reenqueue on retriable network errors' do
- pipeline_tracker = create(
+ context 'when network error is raised' do
+ let(:pipeline_tracker) do
+ create(
:bulk_import_tracker,
entity: entity,
pipeline_name: 'FakePipeline',
status_event: 'enqueue'
)
+ end
- exception = BulkImports::NetworkError.new(
- response: double(code: 429, headers: {})
- )
+ let(:exception) do
+ BulkImports::NetworkError.new(response: instance_double(HTTParty::Response, code: 429, headers: {}))
+ end
+
+ before do
+ allow(subject).to receive(:jid).and_return('jid')
expect_next_instance_of(pipeline_class) do |pipeline|
expect(pipeline)
.to receive(:run)
.and_raise(exception)
end
+ end
- allow(subject).to receive(:jid).and_return('jid')
-
- expect_any_instance_of(BulkImports::Tracker) do |tracker|
- expect(tracker).to receive(:retry).and_call_original
- end
+ context 'when error is retriable' do
+ it 'reenqueues the worker' do
+ expect_any_instance_of(BulkImports::Tracker) do |tracker|
+ expect(tracker).to receive(:retry).and_call_original
+ end
+
+ expect_next_instance_of(Gitlab::Import::Logger) do |logger|
+ expect(logger)
+ .to receive(:info)
+ .with(
+ hash_including(
+ 'pipeline_name' => 'FakePipeline',
+ 'entity_id' => entity.id
+ )
+ )
+ end
- expect_next_instance_of(Gitlab::Import::Logger) do |logger|
- expect(logger)
- .to receive(:info)
+ expect(described_class)
+ .to receive(:perform_in)
.with(
- hash_including(
- 'pipeline_name' => 'FakePipeline',
- 'entity_id' => entity.id
- )
+ 60.seconds,
+ pipeline_tracker.id,
+ pipeline_tracker.stage,
+ pipeline_tracker.entity.id
)
+
+ subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
+
+ pipeline_tracker.reload
+
+ expect(pipeline_tracker.enqueued?).to be_truthy
end
- expect(described_class)
- .to receive(:perform_in)
- .with(
- 60.seconds,
- pipeline_tracker.id,
- pipeline_tracker.stage,
- pipeline_tracker.entity.id
- )
+ context 'when error is not retriable' do
+ let(:exception) do
+ BulkImports::NetworkError.new(response: instance_double(HTTParty::Response, code: 503, headers: {}))
+ end
- subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
+ it 'marks tracker as failed and logs the error' do
+ expect(described_class).not_to receive(:perform_in)
+
+ subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
- pipeline_tracker.reload
+ pipeline_tracker.reload
- expect(pipeline_tracker.enqueued?).to be_truthy
+ expect(pipeline_tracker.failed?).to eq(true)
+ end
+ end
end
end
end
@@ -253,13 +296,14 @@ RSpec.describe BulkImports::PipelineWorker do
allow_next_instance_of(BulkImports::Groups::Stage) do |instance|
allow(instance).to receive(:pipelines)
- .and_return([[0, file_extraction_pipeline]])
+ .and_return([{ stage: 0, pipeline: file_extraction_pipeline }])
end
end
it 'runs the pipeline successfully' do
allow_next_instance_of(BulkImports::ExportStatus) do |status|
allow(status).to receive(:started?).and_return(false)
+ allow(status).to receive(:empty?).and_return(false)
allow(status).to receive(:failed?).and_return(false)
end
@@ -272,6 +316,28 @@ RSpec.describe BulkImports::PipelineWorker do
it 'reenqueues pipeline worker' do
allow_next_instance_of(BulkImports::ExportStatus) do |status|
allow(status).to receive(:started?).and_return(true)
+ allow(status).to receive(:empty?).and_return(false)
+ allow(status).to receive(:failed?).and_return(false)
+ end
+
+ expect(described_class)
+ .to receive(:perform_in)
+ .with(
+ described_class::FILE_EXTRACTION_PIPELINE_PERFORM_DELAY,
+ pipeline_tracker.id,
+ pipeline_tracker.stage,
+ entity.id
+ )
+
+ subject.perform(pipeline_tracker.id, pipeline_tracker.stage, entity.id)
+ end
+ end
+
+ context 'when export status is empty' do
+ it 'reenqueues pipeline worker' do
+ allow_next_instance_of(BulkImports::ExportStatus) do |status|
+ allow(status).to receive(:started?).and_return(false)
+ allow(status).to receive(:empty?).and_return(true)
allow(status).to receive(:failed?).and_return(false)
end
diff --git a/spec/workers/ci/archive_trace_worker_spec.rb b/spec/workers/ci/archive_trace_worker_spec.rb
index 889e0c92042..52723ff5823 100644
--- a/spec/workers/ci/archive_trace_worker_spec.rb
+++ b/spec/workers/ci/archive_trace_worker_spec.rb
@@ -16,6 +16,34 @@ RSpec.describe Ci::ArchiveTraceWorker do
subject
end
+
+ it 'has preloaded the arguments for archiving' do
+ allow_next_instance_of(Ci::ArchiveTraceService) do |instance|
+ allow(instance).to receive(:execute) do |job|
+ expect(job.association(:project)).to be_loaded
+ expect(job.association(:pending_state)).to be_loaded
+ end
+ end
+
+ subject
+ end
+
+ context 'when sticky_ci_archive_trace_worker is disabled' do
+ before do
+ stub_feature_flags(sticky_ci_archive_trace_worker: false)
+ end
+
+ it 'does not preload associations' do
+ allow_next_instance_of(Ci::ArchiveTraceService) do |instance|
+ allow(instance).to receive(:execute) do |job|
+ expect(job.association(:project)).not_to be_loaded
+ expect(job.association(:pending_state)).not_to be_loaded
+ end
+ end
+
+ subject
+ end
+ end
end
context 'when job is not found' do
diff --git a/spec/workers/ci/resource_groups/assign_resource_from_resource_group_worker_spec.rb b/spec/workers/ci/resource_groups/assign_resource_from_resource_group_worker_spec.rb
index be7f7ef5c8c..785cba24f9d 100644
--- a/spec/workers/ci/resource_groups/assign_resource_from_resource_group_worker_spec.rb
+++ b/spec/workers/ci/resource_groups/assign_resource_from_resource_group_worker_spec.rb
@@ -25,7 +25,7 @@ RSpec.describe Ci::ResourceGroups::AssignResourceFromResourceGroupWorker do
context 'when resource group exists' do
it 'executes AssignResourceFromResourceGroupService' do
- expect_next_instances_of(Ci::ResourceGroups::AssignResourceFromResourceGroupService, 2, resource_group.project, nil) do |service|
+ expect_next_instances_of(Ci::ResourceGroups::AssignResourceFromResourceGroupService, 2, false, resource_group.project, nil) do |service|
expect(service).to receive(:execute).with(resource_group)
end
diff --git a/spec/workers/clusters/applications/activate_service_worker_spec.rb b/spec/workers/clusters/applications/activate_integration_worker_spec.rb
index d13ff76613c..ecb49be5a4b 100644
--- a/spec/workers/clusters/applications/activate_service_worker_spec.rb
+++ b/spec/workers/clusters/applications/activate_integration_worker_spec.rb
@@ -2,8 +2,8 @@
require 'spec_helper'
-RSpec.describe Clusters::Applications::ActivateServiceWorker, '#perform' do
- context 'cluster exists' do
+RSpec.describe Clusters::Applications::ActivateIntegrationWorker, '#perform' do
+ context 'when cluster exists' do
describe 'prometheus integration' do
let(:integration_name) { 'prometheus' }
@@ -11,7 +11,7 @@ RSpec.describe Clusters::Applications::ActivateServiceWorker, '#perform' do
create(:clusters_integrations_prometheus, cluster: cluster)
end
- context 'cluster type: group' do
+ context 'with cluster type: group' do
let(:group) { create(:group) }
let(:project) { create(:project, group: group) }
let(:cluster) { create(:cluster_for_group, groups: [group]) }
@@ -22,7 +22,7 @@ RSpec.describe Clusters::Applications::ActivateServiceWorker, '#perform' do
end
end
- context 'cluster type: project' do
+ context 'with cluster type: project' do
let(:project) { create(:project) }
let(:cluster) { create(:cluster, projects: [project]) }
@@ -32,7 +32,7 @@ RSpec.describe Clusters::Applications::ActivateServiceWorker, '#perform' do
end
end
- context 'cluster type: instance' do
+ context 'with cluster type: instance' do
let(:project) { create(:project) }
let(:cluster) { create(:cluster, :instance) }
@@ -40,11 +40,20 @@ RSpec.describe Clusters::Applications::ActivateServiceWorker, '#perform' do
expect { described_class.new.perform(cluster.id, integration_name) }
.to change { project.reload.prometheus_integration&.active }.from(nil).to(true)
end
+
+ context 'when using the old worker class' do
+ let(:described_class) { Clusters::Applications::ActivateServiceWorker }
+
+ it 'ensures Prometheus integration is activated' do
+ expect { described_class.new.perform(cluster.id, integration_name) }
+ .to change { project.reload.prometheus_integration&.active }.from(nil).to(true)
+ end
+ end
end
end
end
- context 'cluster does not exist' do
+ context 'when cluster does not exist' do
it 'does not raise Record Not Found error' do
expect { described_class.new.perform(0, 'ignored in this context') }.not_to raise_error
end
diff --git a/spec/workers/clusters/applications/deactivate_service_worker_spec.rb b/spec/workers/clusters/applications/deactivate_integration_worker_spec.rb
index 77788cfa893..3f0188eee23 100644
--- a/spec/workers/clusters/applications/deactivate_service_worker_spec.rb
+++ b/spec/workers/clusters/applications/deactivate_integration_worker_spec.rb
@@ -2,20 +2,22 @@
require 'spec_helper'
-RSpec.describe Clusters::Applications::DeactivateServiceWorker, '#perform' do
- context 'cluster exists' do
+RSpec.describe Clusters::Applications::DeactivateIntegrationWorker, '#perform' do
+ context 'when cluster exists' do
describe 'prometheus integration' do
let(:integration_name) { 'prometheus' }
let!(:integration) { create(:clusters_integrations_prometheus, cluster: cluster) }
- context 'prometheus integration exists' do
- let!(:prometheus_integration) { create(:prometheus_integration, project: project, manual_configuration: false, active: true) }
+ context 'when prometheus integration exists' do
+ let!(:prometheus_integration) do
+ create(:prometheus_integration, project: project, manual_configuration: false, active: true)
+ end
before do
integration.delete # prometheus integration before save synchronises active stated with integration existence.
end
- context 'cluster type: group' do
+ context 'with cluster type: group' do
let(:group) { create(:group) }
let(:project) { create(:project, group: group) }
let(:cluster) { create(:cluster_for_group, groups: [group]) }
@@ -26,7 +28,7 @@ RSpec.describe Clusters::Applications::DeactivateServiceWorker, '#perform' do
end
end
- context 'cluster type: project' do
+ context 'with cluster type: project' do
let(:project) { create(:project) }
let(:cluster) { create(:cluster, projects: [project]) }
@@ -36,7 +38,7 @@ RSpec.describe Clusters::Applications::DeactivateServiceWorker, '#perform' do
end
end
- context 'cluster type: instance' do
+ context 'with cluster type: instance' do
let(:project) { create(:project) }
let(:cluster) { create(:cluster, :instance) }
@@ -44,11 +46,20 @@ RSpec.describe Clusters::Applications::DeactivateServiceWorker, '#perform' do
expect { described_class.new.perform(cluster.id, integration_name) }
.to change { prometheus_integration.reload.active }.from(true).to(false)
end
+
+ context 'when using the old worker class' do
+ let(:described_class) { Clusters::Applications::ActivateServiceWorker }
+
+ it 'ensures Prometheus integration is deactivated' do
+ expect { described_class.new.perform(cluster.id, integration_name) }
+ .to change { prometheus_integration.reload.active }.from(true).to(false)
+ end
+ end
end
end
- context 'prometheus integration does not exist' do
- context 'cluster type: project' do
+ context 'when prometheus integration does not exist' do
+ context 'with cluster type: project' do
let(:project) { create(:project) }
let(:cluster) { create(:cluster, projects: [project]) }
@@ -60,7 +71,7 @@ RSpec.describe Clusters::Applications::DeactivateServiceWorker, '#perform' do
end
end
- context 'cluster does not exist' do
+ context 'when cluster does not exist' do
it 'raises Record Not Found error' do
expect { described_class.new.perform(0, 'ignored in this context') }.to raise_error(ActiveRecord::RecordNotFound)
end
diff --git a/spec/workers/clusters/applications/wait_for_uninstall_app_worker_spec.rb b/spec/workers/clusters/applications/wait_for_uninstall_app_worker_spec.rb
index 0191a2898b2..d1dd1cd738b 100644
--- a/spec/workers/clusters/applications/wait_for_uninstall_app_worker_spec.rb
+++ b/spec/workers/clusters/applications/wait_for_uninstall_app_worker_spec.rb
@@ -9,7 +9,7 @@ RSpec.describe Clusters::Applications::WaitForUninstallAppWorker, '#perform' do
subject { described_class.new.perform(app_name, app_id) }
- context 'app exists' do
+ context 'when app exists' do
let(:service) { instance_double(Clusters::Applications::CheckUninstallProgressService) }
it 'calls the check service' do
@@ -20,7 +20,7 @@ RSpec.describe Clusters::Applications::WaitForUninstallAppWorker, '#perform' do
end
end
- context 'app does not exist' do
+ context 'when app does not exist' do
let(:app_id) { 0 }
it 'does not call the check service' do
diff --git a/spec/workers/concerns/cronjob_queue_spec.rb b/spec/workers/concerns/cronjob_queue_spec.rb
index d1ad5c65ea3..0244535051f 100644
--- a/spec/workers/concerns/cronjob_queue_spec.rb
+++ b/spec/workers/concerns/cronjob_queue_spec.rb
@@ -11,11 +11,33 @@ RSpec.describe CronjobQueue do
include ApplicationWorker
include CronjobQueue # rubocop:disable Scalability/CronWorkerContext
+
+ def perform
+ AnotherWorker.perform_async('identifier')
+ end
+ end
+ end
+
+ let(:another_worker) do
+ Class.new do
+ def self.name
+ 'AnotherWorker'
+ end
+
+ include ApplicationWorker
+
+ # To keep track of the context that was active for certain arguments
+ cattr_accessor(:contexts) { {} }
+
+ def perform(identifier, *args)
+ self.class.contexts.merge!(identifier => Gitlab::ApplicationContext.current)
+ end
end
end
before do
stub_const("DummyWorker", worker)
+ stub_const("AnotherWorker", another_worker)
end
it 'sets the queue name of a worker' do
@@ -27,7 +49,7 @@ RSpec.describe CronjobQueue do
end
it 'automatically clears project, user and namespace from the context', :aggregate_failues do
- worker_context = worker.get_worker_context.to_lazy_hash.transform_values(&:call)
+ worker_context = worker.get_worker_context.to_lazy_hash.transform_values { |v| v.try(:call) }
expect(worker_context[:user]).to be_nil
expect(worker_context[:root_namespace]).to be_nil
@@ -42,6 +64,14 @@ RSpec.describe CronjobQueue do
expect(job).to include('meta.caller_id' => 'Cronjob')
end
+ it 'gets root_caller_id from the cronjob' do
+ Sidekiq::Testing.inline! do
+ worker.perform_async
+ end
+
+ expect(AnotherWorker.contexts['identifier']).to include('meta.root_caller_id' => 'Cronjob')
+ end
+
it 'does not set the caller_id if there was already one in the context' do
Gitlab::ApplicationContext.with_context(caller_id: 'already set') do
worker.perform_async
diff --git a/spec/workers/concerns/limited_capacity/job_tracker_spec.rb b/spec/workers/concerns/limited_capacity/job_tracker_spec.rb
index f141a1ad7ad..eeccdbd0e2d 100644
--- a/spec/workers/concerns/limited_capacity/job_tracker_spec.rb
+++ b/spec/workers/concerns/limited_capacity/job_tracker_spec.rb
@@ -2,7 +2,7 @@
require 'spec_helper'
-RSpec.describe LimitedCapacity::JobTracker, :clean_gitlab_redis_queues do
+RSpec.describe LimitedCapacity::JobTracker, :clean_gitlab_redis_shared_state do
let(:job_tracker) do
described_class.new('namespace')
end
diff --git a/spec/workers/concerns/worker_attributes_spec.rb b/spec/workers/concerns/worker_attributes_spec.rb
index ad9d5eeccbe..5e8f68923fd 100644
--- a/spec/workers/concerns/worker_attributes_spec.rb
+++ b/spec/workers/concerns/worker_attributes_spec.rb
@@ -3,6 +3,8 @@
require 'spec_helper'
RSpec.describe WorkerAttributes do
+ using RSpec::Parameterized::TableSyntax
+
let(:worker) do
Class.new do
def self.name
@@ -13,21 +15,64 @@ RSpec.describe WorkerAttributes do
end
end
- describe '.data_consistency' do
- context 'with valid data_consistency' do
- it 'returns correct data_consistency' do
- worker.data_consistency(:sticky)
-
- expect(worker.get_data_consistency).to eq(:sticky)
+ let(:child_worker) do
+ Class.new(worker) do
+ def self.name
+ "TestChildworker"
end
end
+ end
+
+ describe 'class attributes' do
+ # rubocop: disable Layout/LineLength
+ where(:getter, :setter, :default, :values, :expected) do
+ :get_feature_category | :feature_category | nil | [:foo] | :foo
+ :get_urgency | :urgency | :low | [:high] | :high
+ :get_data_consistency | :data_consistency | :always | [:sticky] | :sticky
+ :get_worker_resource_boundary | :worker_resource_boundary | :unknown | [:cpu] | :cpu
+ :get_weight | :weight | 1 | [3] | 3
+ :get_tags | :tags | [] | [:foo, :bar] | [:foo, :bar]
+ :get_deduplicate_strategy | :deduplicate | :until_executing | [:none] | :none
+ :get_deduplication_options | :deduplicate | {} | [:none, including_scheduled: true] | { including_scheduled: true }
+ :worker_has_external_dependencies? | :worker_has_external_dependencies! | false | [] | true
+ :idempotent? | :idempotent! | false | [] | true
+ :big_payload? | :big_payload! | false | [] | true
+ end
+ # rubocop: enable Layout/LineLength
+
+ with_them do
+ context 'when the attribute is set' do
+ before do
+ worker.public_send(setter, *values)
+ end
+
+ it 'returns the expected value' do
+ expect(worker.public_send(getter)).to eq(expected)
+ expect(child_worker.public_send(getter)).to eq(expected)
+ end
+ end
+
+ context 'when the attribute is not set' do
+ it 'returns the default value' do
+ expect(worker.public_send(getter)).to eq(default)
+ expect(child_worker.public_send(getter)).to eq(default)
+ end
+ end
+
+ context 'when the attribute is set in the child worker' do
+ before do
+ child_worker.public_send(setter, *values)
+ end
- context 'when data_consistency is not provided' do
- it 'defaults to :always' do
- expect(worker.get_data_consistency).to eq(:always)
+ it 'returns the default value for the parent, and the expected value for the child' do
+ expect(worker.public_send(getter)).to eq(default)
+ expect(child_worker.public_send(getter)).to eq(expected)
+ end
end
end
+ end
+ describe '.data_consistency' do
context 'with invalid data_consistency' do
it 'raise exception' do
expect { worker.data_consistency(:invalid) }
@@ -45,36 +90,12 @@ RSpec.describe WorkerAttributes do
it 'returns correct feature flag value' do
worker.data_consistency(:sticky, feature_flag: :test_feature_flag)
- expect(worker.get_data_consistency_feature_flag_enabled?).not_to be_truthy
+ expect(worker.get_data_consistency_feature_flag_enabled?).not_to be(true)
+ expect(child_worker.get_data_consistency_feature_flag_enabled?).not_to be(true)
end
end
end
- describe '.idempotent?' do
- subject(:idempotent?) { worker.idempotent? }
-
- context 'when the worker is idempotent' do
- before do
- worker.idempotent!
- end
-
- it { is_expected.to be_truthy }
- end
-
- context 'when the worker is not idempotent' do
- it { is_expected.to be_falsey }
- end
- end
-
- describe '.deduplicate' do
- it 'sets deduplication_strategy and deduplication_options' do
- worker.deduplicate(:until_executing, including_scheduled: true)
-
- expect(worker.send(:class_attributes)[:deduplication_strategy]).to eq(:until_executing)
- expect(worker.send(:class_attributes)[:deduplication_options]).to eq(including_scheduled: true)
- end
- end
-
describe '#deduplication_enabled?' do
subject(:deduplication_enabled?) { worker.deduplication_enabled? }
@@ -83,7 +104,10 @@ RSpec.describe WorkerAttributes do
worker.deduplicate(:until_executing)
end
- it { is_expected.to eq(true) }
+ it 'returns true' do
+ expect(worker.deduplication_enabled?).to be(true)
+ expect(child_worker.deduplication_enabled?).to be(true)
+ end
end
context 'when feature flag is set' do
@@ -99,7 +123,10 @@ RSpec.describe WorkerAttributes do
stub_feature_flags(my_feature_flag: true)
end
- it { is_expected.to eq(true) }
+ it 'returns true' do
+ expect(worker.deduplication_enabled?).to be(true)
+ expect(child_worker.deduplication_enabled?).to be(true)
+ end
end
context 'when the FF is disabled' do
@@ -107,7 +134,10 @@ RSpec.describe WorkerAttributes do
stub_feature_flags(my_feature_flag: false)
end
- it { is_expected.to eq(false) }
+ it 'returns false' do
+ expect(worker.deduplication_enabled?).to be(false)
+ expect(child_worker.deduplication_enabled?).to be(false)
+ end
end
end
end
diff --git a/spec/workers/container_registry/migration/enqueuer_worker_spec.rb b/spec/workers/container_registry/migration/enqueuer_worker_spec.rb
index a57a9e3b2e8..ab3bd8f75d4 100644
--- a/spec/workers/container_registry/migration/enqueuer_worker_spec.rb
+++ b/spec/workers/container_registry/migration/enqueuer_worker_spec.rb
@@ -32,660 +32,356 @@ RSpec.describe ContainerRegistry::Migration::EnqueuerWorker, :aggregate_failures
end
end
- context 'with container_registry_migration_phase2_enqueuer_loop disabled' do
+ context 'migrations are disabled' do
before do
- stub_feature_flags(container_registry_migration_phase2_enqueuer_loop: false)
+ allow(ContainerRegistry::Migration).to receive(:enabled?).and_return(false)
end
- shared_examples 're-enqueuing based on capacity' do |capacity_limit: 4|
- context 'below capacity' do
- before do
- allow(ContainerRegistry::Migration).to receive(:capacity).and_return(capacity_limit)
- end
-
- it 're-enqueues the worker' do
- expect(described_class).to receive(:perform_async)
- expect(described_class).to receive(:perform_in).with(7.seconds)
-
- subject
- end
-
- context 'enqueue_twice feature flag disabled' do
- before do
- stub_feature_flags(container_registry_migration_phase2_enqueue_twice: false)
- end
-
- it 'only enqueues the worker once' do
- expect(described_class).to receive(:perform_async)
- expect(described_class).not_to receive(:perform_in)
-
- subject
- end
- end
- end
-
- context 'above capacity' do
- before do
- allow(ContainerRegistry::Migration).to receive(:capacity).and_return(-1)
- end
-
- it 'does not re-enqueue the worker' do
- expect(described_class).not_to receive(:perform_async)
- expect(described_class).not_to receive(:perform_in).with(7.seconds)
-
- subject
- end
- end
- end
-
- context 'with qualified repository' do
- before do
- allow_worker(on: :next_repository) do |repository|
- allow(repository).to receive(:migration_pre_import).and_return(:ok)
- end
- end
-
- shared_examples 'starting the next import' do
- it 'starts the pre-import for the next qualified repository' do
- expect_log_extra_metadata(
- import_type: 'next',
- container_repository_id: container_repository.id,
- container_repository_path: container_repository.path,
- container_repository_migration_state: 'pre_importing'
- )
-
- expect { subject }.to make_queries_matching(/LIMIT 2/)
-
- expect(container_repository.reload).to be_pre_importing
- end
- end
-
- it_behaves_like 'starting the next import'
-
- context 'when the new pre-import maxes out the capacity' do
- before do
- # set capacity to 10
- stub_feature_flags(
- container_registry_migration_phase2_capacity_25: false
- )
-
- # Plus 2 created above gives 9 importing repositories
- create_list(:container_repository, 7, :importing)
- end
-
- it 'does not re-enqueue the worker' do
- expect(described_class).not_to receive(:perform_async)
- expect(described_class).not_to receive(:perform_in)
-
- subject
- end
- end
-
- it_behaves_like 're-enqueuing based on capacity'
-
- context 'max tag count is 0' do
- before do
- stub_application_setting(container_registry_import_max_tags_count: 0)
- # Add 8 tags to the next repository
- stub_container_registry_tags(
- repository: container_repository.path, tags: %w(a b c d e f g h), with_manifest: true
- )
- end
-
- it_behaves_like 'starting the next import'
- end
- end
-
- context 'migrations are disabled' do
+ it_behaves_like 'no action' do
before do
- allow(ContainerRegistry::Migration).to receive(:enabled?).and_return(false)
- end
-
- it_behaves_like 'no action' do
- before do
- expect_log_extra_metadata(migration_enabled: false)
- end
+ expect_log_extra_metadata(migration_enabled: false)
end
end
+ end
- context 'above capacity' do
+ context 'with no repository qualifies' do
+ include_examples 'an idempotent worker' do
before do
- create(:container_repository, :importing)
- create(:container_repository, :importing)
- allow(ContainerRegistry::Migration).to receive(:capacity).and_return(1)
- end
-
- it_behaves_like 'no action' do
- before do
- expect_log_extra_metadata(below_capacity: false, max_capacity_setting: 1)
- end
- end
-
- it 'does not re-enqueue the worker' do
- expect(ContainerRegistry::Migration::EnqueuerWorker).not_to receive(:perform_async)
- expect(ContainerRegistry::Migration::EnqueuerWorker).not_to receive(:perform_in)
-
- subject
- end
- end
-
- context 'too soon before previous completed import step' do
- where(:state, :timestamp) do
- :import_done | :migration_import_done_at
- :pre_import_done | :migration_pre_import_done_at
- :import_aborted | :migration_aborted_at
- :import_skipped | :migration_skipped_at
- end
-
- with_them do
- before do
- allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(45.minutes)
- create(:container_repository, state, timestamp => 1.minute.ago)
- end
-
- it_behaves_like 'no action' do
- before do
- expect_log_extra_metadata(waiting_time_passed: false, current_waiting_time_setting: 45.minutes)
- end
- end
+ allow(ContainerRepository).to receive(:ready_for_import).and_return(ContainerRepository.none)
end
- context 'when last completed repository has nil timestamps' do
- before do
- allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(45.minutes)
- create(:container_repository, migration_state: 'import_done')
- end
-
- it 'continues to try the next import' do
- expect { subject }.to change { container_repository.reload.migration_state }
- end
- end
- end
-
- context 'when an aborted import is available' do
- let_it_be(:aborted_repository) { create(:container_repository, :import_aborted) }
-
- context 'with a successful registry request' do
- before do
- allow_worker(on: :next_aborted_repository) do |repository|
- allow(repository).to receive(:migration_import).and_return(:ok)
- allow(repository.gitlab_api_client).to receive(:import_status).and_return('import_failed')
- end
- end
-
- it 'retries the import for the aborted repository' do
- expect_log_extra_metadata(
- import_type: 'retry',
- container_repository_id: aborted_repository.id,
- container_repository_path: aborted_repository.path,
- container_repository_migration_state: 'importing'
- )
-
- subject
-
- expect(aborted_repository.reload).to be_importing
- expect(container_repository.reload).to be_default
- end
-
- it_behaves_like 're-enqueuing based on capacity'
- end
-
- context 'when an error occurs' do
- it 'does not abort that migration' do
- allow_worker(on: :next_aborted_repository) do |repository|
- allow(repository).to receive(:retry_aborted_migration).and_raise(StandardError)
- end
-
- expect_log_extra_metadata(
- import_type: 'retry',
- container_repository_id: aborted_repository.id,
- container_repository_path: aborted_repository.path,
- container_repository_migration_state: 'import_aborted'
- )
-
- subject
-
- expect(aborted_repository.reload).to be_import_aborted
- expect(container_repository.reload).to be_default
- end
- end
+ it_behaves_like 'no action'
end
+ end
- context 'when no repository qualifies' do
- include_examples 'an idempotent worker' do
- before do
- allow(ContainerRepository).to receive(:ready_for_import).and_return(ContainerRepository.none)
- end
+ context 'when multiple aborted imports are available' do
+ let_it_be(:aborted_repository1) { create(:container_repository, :import_aborted) }
+ let_it_be(:aborted_repository2) { create(:container_repository, :import_aborted) }
- it_behaves_like 'no action'
- end
+ before do
+ container_repository.update!(created_at: 30.seconds.ago)
end
- context 'over max tag count' do
+ context 'with successful registry requests' do
before do
- stub_application_setting(container_registry_import_max_tags_count: 2)
+ allow_worker(on: :next_aborted_repository) do |repository|
+ allow(repository).to receive(:migration_import).and_return(:ok)
+ allow(repository.gitlab_api_client).to receive(:import_status).and_return('import_failed')
+ end
end
- it 'skips the repository' do
- expect_log_extra_metadata(
- import_type: 'next',
- container_repository_id: container_repository.id,
- container_repository_path: container_repository.path,
- container_repository_migration_state: 'import_skipped',
- tags_count_too_high: true,
- max_tags_count_setting: 2
+ it 'retries the import for the aborted repository' do
+ expect_log_info(
+ [
+ {
+ import_type: 'retry',
+ container_repository_id: aborted_repository1.id,
+ container_repository_path: aborted_repository1.path,
+ container_repository_migration_state: 'importing'
+ },
+ {
+ import_type: 'retry',
+ container_repository_id: aborted_repository2.id,
+ container_repository_path: aborted_repository2.path,
+ container_repository_migration_state: 'importing'
+ }
+ ]
)
- subject
-
- expect(container_repository.reload).to be_import_skipped
- expect(container_repository.migration_skipped_reason).to eq('too_many_tags')
- expect(container_repository.migration_skipped_at).not_to be_nil
- end
+ expect(worker).to receive(:handle_next_migration).and_call_original
- context 're-enqueuing' do
- before do
- # skipping will also re-enqueue, so we isolate the capacity behavior here
- allow_worker(on: :next_repository) do |repository|
- allow(repository).to receive(:skip_import).and_return(true)
- end
- end
+ subject
- it_behaves_like 're-enqueuing based on capacity', capacity_limit: 3
+ expect(aborted_repository1.reload).to be_importing
+ expect(aborted_repository2.reload).to be_importing
end
end
context 'when an error occurs' do
- before do
- allow(ContainerRegistry::Migration).to receive(:max_tags_count).and_raise(StandardError)
- end
-
- it 'aborts the import' do
- expect_log_extra_metadata(
- import_type: 'next',
- container_repository_id: container_repository.id,
- container_repository_path: container_repository.path,
- container_repository_migration_state: 'import_aborted'
- )
+ it 'does abort that migration' do
+ allow_worker(on: :next_aborted_repository) do |repository|
+ allow(repository).to receive(:retry_aborted_migration).and_raise(StandardError)
+ end
- expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
- instance_of(StandardError),
- next_repository_id: container_repository.id
+ expect_log_info(
+ [
+ {
+ import_type: 'retry',
+ container_repository_id: aborted_repository1.id,
+ container_repository_path: aborted_repository1.path,
+ container_repository_migration_state: 'import_aborted'
+ }
+ ]
)
subject
- expect(container_repository.reload).to be_import_aborted
+ expect(aborted_repository1.reload).to be_import_aborted
+ expect(aborted_repository2.reload).to be_import_aborted
end
end
+ end
- context 'with the exclusive lease taken' do
- let(:lease_key) { worker.send(:lease_key) }
+ context 'when multiple qualified repositories are available' do
+ let_it_be(:container_repository2) { create(:container_repository, created_at: 2.days.ago) }
- before do
- stub_exclusive_lease_taken(lease_key, timeout: 30.minutes)
+ before do
+ allow_worker(on: :next_repository) do |repository|
+ allow(repository).to receive(:migration_pre_import).and_return(:ok)
end
- it 'does not perform' do
- expect(worker).not_to receive(:runnable?)
- expect(worker).not_to receive(:re_enqueue_if_capacity)
-
- subject
- end
+ stub_container_registry_tags(
+ repository: container_repository2.path,
+ tags: %w(tag4 tag5 tag6),
+ with_manifest: true
+ )
end
- end
- context 'with container_registry_migration_phase2_enqueuer_loop enabled' do
- context 'migrations are disabled' do
- before do
- allow(ContainerRegistry::Migration).to receive(:enabled?).and_return(false)
- end
+ shared_examples 'starting all the next imports' do
+ it 'starts the pre-import for the next qualified repositories' do
+ expect_log_info(
+ [
+ {
+ import_type: 'next',
+ container_repository_id: container_repository.id,
+ container_repository_path: container_repository.path,
+ container_repository_migration_state: 'pre_importing'
+ },
+ {
+ import_type: 'next',
+ container_repository_id: container_repository2.id,
+ container_repository_path: container_repository2.path,
+ container_repository_migration_state: 'pre_importing'
+ }
+ ]
+ )
- it_behaves_like 'no action' do
- before do
- expect_log_extra_metadata(migration_enabled: false)
- end
- end
- end
+ expect(worker).to receive(:handle_next_migration).exactly(3).times.and_call_original
- context 'with no repository qualifies' do
- include_examples 'an idempotent worker' do
- before do
- allow(ContainerRepository).to receive(:ready_for_import).and_return(ContainerRepository.none)
- end
+ expect { subject }.to make_queries_matching(/LIMIT 2/)
- it_behaves_like 'no action'
+ expect(container_repository.reload).to be_pre_importing
+ expect(container_repository2.reload).to be_pre_importing
end
end
- context 'when multiple aborted imports are available' do
- let_it_be(:aborted_repository1) { create(:container_repository, :import_aborted) }
- let_it_be(:aborted_repository2) { create(:container_repository, :import_aborted) }
+ it_behaves_like 'starting all the next imports'
+ context 'when the new pre-import maxes out the capacity' do
before do
- container_repository.update!(created_at: 30.seconds.ago)
+ # set capacity to 10
+ stub_feature_flags(
+ container_registry_migration_phase2_capacity_25: false,
+ container_registry_migration_phase2_capacity_40: false
+ )
+
+ # Plus 2 created above gives 9 importing repositories
+ create_list(:container_repository, 7, :importing)
end
- context 'with successful registry requests' do
- before do
- allow_worker(on: :next_aborted_repository) do |repository|
- allow(repository).to receive(:migration_import).and_return(:ok)
- allow(repository.gitlab_api_client).to receive(:import_status).and_return('import_failed')
- end
- end
+ it 'starts the pre-import only for one qualified repository' do
+ expect_log_info(
+ [
+ {
+ import_type: 'next',
+ container_repository_id: container_repository.id,
+ container_repository_path: container_repository.path,
+ container_repository_migration_state: 'pre_importing'
+ }
+ ]
+ )
- it 'retries the import for the aborted repository' do
- expect_log_info(
- [
- {
- import_type: 'retry',
- container_repository_id: aborted_repository1.id,
- container_repository_path: aborted_repository1.path,
- container_repository_migration_state: 'importing'
- },
- {
- import_type: 'retry',
- container_repository_id: aborted_repository2.id,
- container_repository_path: aborted_repository2.path,
- container_repository_migration_state: 'importing'
- }
- ]
- )
-
- expect(worker).to receive(:handle_next_migration).and_call_original
-
- subject
-
- expect(aborted_repository1.reload).to be_importing
- expect(aborted_repository2.reload).to be_importing
- end
- end
+ subject
- context 'when an error occurs' do
- it 'does abort that migration' do
- allow_worker(on: :next_aborted_repository) do |repository|
- allow(repository).to receive(:retry_aborted_migration).and_raise(StandardError)
- end
-
- expect_log_info(
- [
- {
- import_type: 'retry',
- container_repository_id: aborted_repository1.id,
- container_repository_path: aborted_repository1.path,
- container_repository_migration_state: 'import_aborted'
- }
- ]
- )
-
- subject
-
- expect(aborted_repository1.reload).to be_import_aborted
- expect(aborted_repository2.reload).to be_import_aborted
- end
+ expect(container_repository.reload).to be_pre_importing
+ expect(container_repository2.reload).to be_default
end
end
- context 'when multiple qualified repositories are available' do
- let_it_be(:container_repository2) { create(:container_repository, created_at: 2.days.ago) }
-
+ context 'max tag count is 0' do
before do
- allow_worker(on: :next_repository) do |repository|
- allow(repository).to receive(:migration_pre_import).and_return(:ok)
- end
-
+ stub_application_setting(container_registry_import_max_tags_count: 0)
+ # Add 8 tags to the next repository
stub_container_registry_tags(
- repository: container_repository2.path,
- tags: %w(tag4 tag5 tag6),
- with_manifest: true
+ repository: container_repository.path, tags: %w(a b c d e f g h), with_manifest: true
)
end
- shared_examples 'starting all the next imports' do
- it 'starts the pre-import for the next qualified repositories' do
- expect_log_info(
- [
- {
- import_type: 'next',
- container_repository_id: container_repository.id,
- container_repository_path: container_repository.path,
- container_repository_migration_state: 'pre_importing'
- },
- {
- import_type: 'next',
- container_repository_id: container_repository2.id,
- container_repository_path: container_repository2.path,
- container_repository_migration_state: 'pre_importing'
- }
- ]
- )
-
- expect(worker).to receive(:handle_next_migration).exactly(3).times.and_call_original
-
- expect { subject }.to make_queries_matching(/LIMIT 2/)
-
- expect(container_repository.reload).to be_pre_importing
- expect(container_repository2.reload).to be_pre_importing
- end
- end
-
it_behaves_like 'starting all the next imports'
+ end
- context 'when the new pre-import maxes out the capacity' do
- before do
- # set capacity to 10
- stub_feature_flags(
- container_registry_migration_phase2_capacity_25: false
- )
+ context 'when the deadline is hit' do
+ it 'does not handle the second qualified repository' do
+ expect(worker).to receive(:loop_deadline).and_return(5.seconds.from_now, 2.seconds.ago)
+ expect(worker).to receive(:handle_next_migration).once.and_call_original
- # Plus 2 created above gives 9 importing repositories
- create_list(:container_repository, 7, :importing)
- end
+ subject
- it 'starts the pre-import only for one qualified repository' do
- expect_log_info(
- [
- {
- import_type: 'next',
- container_repository_id: container_repository.id,
- container_repository_path: container_repository.path,
- container_repository_migration_state: 'pre_importing'
- }
- ]
- )
-
- subject
-
- expect(container_repository.reload).to be_pre_importing
- expect(container_repository2.reload).to be_default
- end
+ expect(container_repository.reload).to be_pre_importing
+ expect(container_repository2.reload).to be_default
end
+ end
+ end
- context 'max tag count is 0' do
- before do
- stub_application_setting(container_registry_import_max_tags_count: 0)
- # Add 8 tags to the next repository
- stub_container_registry_tags(
- repository: container_repository.path, tags: %w(a b c d e f g h), with_manifest: true
- )
- end
+ context 'when a mix of aborted imports and qualified repositories are available' do
+ let_it_be(:aborted_repository) { create(:container_repository, :import_aborted) }
- it_behaves_like 'starting all the next imports'
+ before do
+ allow_worker(on: :next_aborted_repository) do |repository|
+ allow(repository).to receive(:migration_import).and_return(:ok)
+ allow(repository.gitlab_api_client).to receive(:import_status).and_return('import_failed')
end
- context 'when the deadline is hit' do
- it 'does not handle the second qualified repository' do
- expect(worker).to receive(:loop_deadline).and_return(5.seconds.from_now, 2.seconds.ago)
- expect(worker).to receive(:handle_next_migration).once.and_call_original
-
- subject
-
- expect(container_repository.reload).to be_pre_importing
- expect(container_repository2.reload).to be_default
- end
+ allow_worker(on: :next_repository) do |repository|
+ allow(repository).to receive(:migration_pre_import).and_return(:ok)
end
end
- context 'when a mix of aborted imports and qualified repositories are available' do
- let_it_be(:aborted_repository) { create(:container_repository, :import_aborted) }
-
- before do
- allow_worker(on: :next_aborted_repository) do |repository|
- allow(repository).to receive(:migration_import).and_return(:ok)
- allow(repository.gitlab_api_client).to receive(:import_status).and_return('import_failed')
- end
+ it 'retries the aborted repository and start the migration on the qualified repository' do
+ expect_log_info(
+ [
+ {
+ import_type: 'retry',
+ container_repository_id: aborted_repository.id,
+ container_repository_path: aborted_repository.path,
+ container_repository_migration_state: 'importing'
+ },
+ {
+ import_type: 'next',
+ container_repository_id: container_repository.id,
+ container_repository_path: container_repository.path,
+ container_repository_migration_state: 'pre_importing'
+ }
+ ]
+ )
- allow_worker(on: :next_repository) do |repository|
- allow(repository).to receive(:migration_pre_import).and_return(:ok)
- end
- end
+ subject
- it 'retries the aborted repository and start the migration on the qualified repository' do
- expect_log_info(
- [
- {
- import_type: 'retry',
- container_repository_id: aborted_repository.id,
- container_repository_path: aborted_repository.path,
- container_repository_migration_state: 'importing'
- },
- {
- import_type: 'next',
- container_repository_id: container_repository.id,
- container_repository_path: container_repository.path,
- container_repository_migration_state: 'pre_importing'
- }
- ]
- )
+ expect(aborted_repository.reload).to be_importing
+ expect(container_repository.reload).to be_pre_importing
+ end
+ end
- subject
+ context 'above capacity' do
+ before do
+ create(:container_repository, :importing)
+ create(:container_repository, :importing)
+ allow(ContainerRegistry::Migration).to receive(:capacity).and_return(1)
+ end
- expect(aborted_repository.reload).to be_importing
- expect(container_repository.reload).to be_pre_importing
+ it_behaves_like 'no action' do
+ before do
+ expect_log_extra_metadata(below_capacity: false, max_capacity_setting: 1)
end
end
+ end
- context 'above capacity' do
+ context 'too soon before previous completed import step' do
+ where(:state, :timestamp) do
+ :import_done | :migration_import_done_at
+ :pre_import_done | :migration_pre_import_done_at
+ :import_aborted | :migration_aborted_at
+ :import_skipped | :migration_skipped_at
+ end
+
+ with_them do
before do
- create(:container_repository, :importing)
- create(:container_repository, :importing)
- allow(ContainerRegistry::Migration).to receive(:capacity).and_return(1)
+ allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(45.minutes)
+ create(:container_repository, state, timestamp => 1.minute.ago)
end
it_behaves_like 'no action' do
before do
- expect_log_extra_metadata(below_capacity: false, max_capacity_setting: 1)
+ expect_log_extra_metadata(waiting_time_passed: false, current_waiting_time_setting: 45.minutes)
end
end
end
- context 'too soon before previous completed import step' do
- where(:state, :timestamp) do
- :import_done | :migration_import_done_at
- :pre_import_done | :migration_pre_import_done_at
- :import_aborted | :migration_aborted_at
- :import_skipped | :migration_skipped_at
- end
-
- with_them do
- before do
- allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(45.minutes)
- create(:container_repository, state, timestamp => 1.minute.ago)
- end
-
- it_behaves_like 'no action' do
- before do
- expect_log_extra_metadata(waiting_time_passed: false, current_waiting_time_setting: 45.minutes)
- end
- end
+ context 'when last completed repository has nil timestamps' do
+ before do
+ allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(45.minutes)
+ create(:container_repository, migration_state: 'import_done')
end
- context 'when last completed repository has nil timestamps' do
- before do
- allow(ContainerRegistry::Migration).to receive(:enqueue_waiting_time).and_return(45.minutes)
- create(:container_repository, migration_state: 'import_done')
- end
-
- it 'continues to try the next import' do
- expect { subject }.to change { container_repository.reload.migration_state }
- end
+ it 'continues to try the next import' do
+ expect { subject }.to change { container_repository.reload.migration_state }
end
end
+ end
- context 'over max tag count' do
- before do
- stub_application_setting(container_registry_import_max_tags_count: 2)
- end
+ context 'over max tag count' do
+ before do
+ stub_application_setting(container_registry_import_max_tags_count: 2)
+ end
- it 'skips the repository' do
- expect_log_info(
- [
- {
- import_type: 'next',
- container_repository_id: container_repository.id,
- container_repository_path: container_repository.path,
- container_repository_migration_state: 'import_skipped',
- container_repository_migration_skipped_reason: 'too_many_tags'
- }
- ]
- )
+ it 'skips the repository' do
+ expect_log_info(
+ [
+ {
+ import_type: 'next',
+ container_repository_id: container_repository.id,
+ container_repository_path: container_repository.path,
+ container_repository_migration_state: 'import_skipped',
+ container_repository_migration_skipped_reason: 'too_many_tags'
+ }
+ ]
+ )
- expect(worker).to receive(:handle_next_migration).twice.and_call_original
- # skipping the migration will re_enqueue the job
- expect(described_class).to receive(:enqueue_a_job)
+ expect(worker).to receive(:handle_next_migration).twice.and_call_original
+ # skipping the migration will re_enqueue the job
+ expect(described_class).to receive(:enqueue_a_job)
- subject
+ subject
- expect(container_repository.reload).to be_import_skipped
- expect(container_repository.migration_skipped_reason).to eq('too_many_tags')
- expect(container_repository.migration_skipped_at).not_to be_nil
- end
+ expect(container_repository.reload).to be_import_skipped
+ expect(container_repository.migration_skipped_reason).to eq('too_many_tags')
+ expect(container_repository.migration_skipped_at).not_to be_nil
end
+ end
- context 'when an error occurs' do
- before do
- allow(ContainerRegistry::Migration).to receive(:max_tags_count).and_raise(StandardError)
- end
+ context 'when an error occurs' do
+ before do
+ allow(ContainerRegistry::Migration).to receive(:max_tags_count).and_raise(StandardError)
+ end
- it 'aborts the import' do
- expect_log_info(
- [
- {
- import_type: 'next',
- container_repository_id: container_repository.id,
- container_repository_path: container_repository.path,
- container_repository_migration_state: 'import_aborted'
- }
- ]
- )
+ it 'aborts the import' do
+ expect_log_info(
+ [
+ {
+ import_type: 'next',
+ container_repository_id: container_repository.id,
+ container_repository_path: container_repository.path,
+ container_repository_migration_state: 'import_aborted'
+ }
+ ]
+ )
- expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
- instance_of(StandardError),
- next_repository_id: container_repository.id
- )
+ expect(Gitlab::ErrorTracking).to receive(:log_exception).with(
+ instance_of(StandardError),
+ next_repository_id: container_repository.id
+ )
- # aborting the migration will re_enqueue the job
- expect(described_class).to receive(:enqueue_a_job)
+ # aborting the migration will re_enqueue the job
+ expect(described_class).to receive(:enqueue_a_job)
- subject
+ subject
- expect(container_repository.reload).to be_import_aborted
- end
+ expect(container_repository.reload).to be_import_aborted
end
+ end
- context 'with the exclusive lease taken' do
- let(:lease_key) { worker.send(:lease_key) }
+ context 'with the exclusive lease taken' do
+ let(:lease_key) { worker.send(:lease_key) }
- before do
- stub_exclusive_lease_taken(lease_key, timeout: 30.minutes)
- end
+ before do
+ stub_exclusive_lease_taken(lease_key, timeout: 30.minutes)
+ end
- it 'does not perform' do
- expect(worker).not_to receive(:handle_aborted_migration)
- expect(worker).not_to receive(:handle_next_migration)
+ it 'does not perform' do
+ expect(worker).not_to receive(:handle_aborted_migration)
+ expect(worker).not_to receive(:handle_next_migration)
- subject
- end
+ subject
end
end
diff --git a/spec/workers/container_registry/migration/guard_worker_spec.rb b/spec/workers/container_registry/migration/guard_worker_spec.rb
index c52a3fc5d54..d2bcfef2f5b 100644
--- a/spec/workers/container_registry/migration/guard_worker_spec.rb
+++ b/spec/workers/container_registry/migration/guard_worker_spec.rb
@@ -37,6 +37,7 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do
expect(worker).to receive(:log_extra_metadata_on_done).with(:stale_migrations_count, 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_stale_migrations_count, 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_long_running_migration_ids, [stale_migration.id])
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_long_running_migration_paths, [stale_migration.path])
expect(ContainerRegistry::Migration).to receive(timeout).and_call_original
expect { subject }
@@ -44,19 +45,6 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do
.and change { stale_migration.reload.migration_state }.to('import_aborted')
.and not_change { ongoing_migration.migration_state }
end
-
- context 'registry_migration_guard_thresholds feature flag disabled' do
- before do
- stub_feature_flags(registry_migration_guard_thresholds: false)
- end
-
- it 'falls back on the hardcoded value' do
- expect(ContainerRegistry::Migration).not_to receive(:pre_import_timeout)
-
- expect { subject }
- .to change { stale_migration.reload.migration_state }.to('import_aborted')
- end
- end
end
context 'migration is canceled' do
@@ -75,6 +63,7 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do
expect(worker).to receive(:log_extra_metadata_on_done).with(:stale_migrations_count, 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_stale_migrations_count, 1)
expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_long_running_migration_ids, [stale_migration.id])
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_long_running_migration_paths, [stale_migration.path])
expect(ContainerRegistry::Migration).to receive(timeout).and_call_original
expect { subject }
@@ -83,19 +72,6 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do
expect(stale_migration.reload.migration_state).to eq('import_skipped')
expect(stale_migration.reload.migration_skipped_reason).to eq('migration_canceled')
end
-
- context 'registry_migration_guard_thresholds feature flag disabled' do
- before do
- stub_feature_flags(registry_migration_guard_thresholds: false)
- end
-
- it 'falls back on the hardcoded value' do
- expect(ContainerRegistry::Migration).not_to receive(timeout)
-
- expect { subject }
- .to change { stale_migration.reload.migration_state }.to('import_skipped')
- end
- end
end
context 'when the retry limit has not been reached' do
@@ -132,16 +108,15 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do
end
context 'with pre_importing stale migrations' do
- let(:ongoing_migration) { create(:container_repository, :pre_importing) }
- let(:stale_migration) { create(:container_repository, :pre_importing, migration_pre_import_started_at: 11.minutes.ago) }
+ let_it_be(:ongoing_migration) { create(:container_repository, :pre_importing) }
+ let_it_be(:stale_migration) { create(:container_repository, :pre_importing, migration_pre_import_started_at: 11.minutes.ago) }
+
let(:import_status) { 'test' }
before do
allow_next_instance_of(ContainerRegistry::GitlabApiClient) do |client|
allow(client).to receive(:import_status).and_return(import_status)
end
-
- stub_application_setting(container_registry_pre_import_timeout: 10.minutes.to_i)
end
it 'will abort the migration' do
@@ -161,7 +136,76 @@ RSpec.describe ContainerRegistry::Migration::GuardWorker, :aggregate_failures do
context 'the client returns pre_import_in_progress' do
let(:import_status) { 'pre_import_in_progress' }
- it_behaves_like 'handling long running migrations', timeout: :pre_import_timeout
+ shared_examples 'not aborting the stale migration' do
+ it 'will not abort the migration' do
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:stale_migrations_count, 1)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:aborted_stale_migrations_count, 0)
+
+ expect { subject }
+ .to not_change(pre_importing_migrations, :count)
+ .and not_change(pre_import_done_migrations, :count)
+ .and not_change(importing_migrations, :count)
+ .and not_change(import_done_migrations, :count)
+ .and not_change(import_aborted_migrations, :count)
+ .and not_change { stale_migration.reload.migration_state }
+ .and not_change { ongoing_migration.migration_state }
+ end
+ end
+
+ context 'not long running' do
+ before do
+ stub_application_setting(container_registry_pre_import_timeout: 12.minutes.to_i)
+ end
+
+ it_behaves_like 'not aborting the stale migration'
+ end
+
+ context 'long running' do
+ before do
+ stub_application_setting(container_registry_pre_import_timeout: 9.minutes.to_i)
+ end
+
+ context 'with registry_migration_guard_dynamic_pre_import_timeout enabled' do
+ before do
+ stub_application_setting(container_registry_pre_import_tags_rate: 1)
+ end
+
+ context 'below the dynamic threshold' do
+ before do
+ allow_next_found_instance_of(ContainerRepository) do |repository|
+ allow(repository).to receive(:tags_count).and_return(11.minutes.to_i + 100)
+ end
+ end
+
+ it_behaves_like 'not aborting the stale migration'
+ end
+
+ context 'above the dynamic threshold' do
+ let(:tags) do
+ Array.new(11.minutes.to_i - 100) { |i| "tag#{i}" }
+ end
+
+ before do
+ # We can't allow_next_found_instance_of because the shared example
+ # 'handling long running migrations' is already using that.
+ # Instead, here we're going to stub the ContainerRegistry::Client instance.
+ allow_next_instance_of(ContainerRegistry::Client) do |client|
+ allow(client).to receive(:repository_tags).and_return({ 'tags' => tags })
+ end
+ end
+
+ it_behaves_like 'handling long running migrations', timeout: :pre_import_timeout
+ end
+ end
+
+ context 'with registry_migration_guard_dynamic_pre_import_timeout disabled' do
+ before do
+ stub_feature_flags(registry_migration_guard_dynamic_pre_import_timeout: false)
+ end
+
+ it_behaves_like 'handling long running migrations', timeout: :pre_import_timeout
+ end
+ end
end
end
diff --git a/spec/workers/database/batched_background_migration/ci_database_worker_spec.rb b/spec/workers/database/batched_background_migration/ci_database_worker_spec.rb
index f3cf5450048..2b4a42060d9 100644
--- a/spec/workers/database/batched_background_migration/ci_database_worker_spec.rb
+++ b/spec/workers/database/batched_background_migration/ci_database_worker_spec.rb
@@ -3,5 +3,5 @@
require 'spec_helper'
RSpec.describe Database::BatchedBackgroundMigration::CiDatabaseWorker, :clean_gitlab_redis_shared_state do
- it_behaves_like 'it runs batched background migration jobs', 'ci', feature_flag: :execute_batched_migrations_on_schedule_ci_database
+ it_behaves_like 'it runs batched background migration jobs', :ci
end
diff --git a/spec/workers/database/batched_background_migration_worker_spec.rb b/spec/workers/database/batched_background_migration_worker_spec.rb
index 7f0883def3c..a6c7db60abe 100644
--- a/spec/workers/database/batched_background_migration_worker_spec.rb
+++ b/spec/workers/database/batched_background_migration_worker_spec.rb
@@ -3,5 +3,5 @@
require 'spec_helper'
RSpec.describe Database::BatchedBackgroundMigrationWorker do
- it_behaves_like 'it runs batched background migration jobs', :main, feature_flag: :execute_batched_migrations_on_schedule
+ it_behaves_like 'it runs batched background migration jobs', :main
end
diff --git a/spec/workers/database/ci_namespace_mirrors_consistency_check_worker_spec.rb b/spec/workers/database/ci_namespace_mirrors_consistency_check_worker_spec.rb
index e5024c568cb..1c083d1d8a3 100644
--- a/spec/workers/database/ci_namespace_mirrors_consistency_check_worker_spec.rb
+++ b/spec/workers/database/ci_namespace_mirrors_consistency_check_worker_spec.rb
@@ -6,29 +6,11 @@ RSpec.describe Database::CiNamespaceMirrorsConsistencyCheckWorker do
let(:worker) { described_class.new }
describe '#perform' do
- context 'feature flag is disabled' do
- before do
- stub_feature_flags(ci_namespace_mirrors_consistency_check: false)
- end
-
- it 'does not perform the consistency check on namespaces' do
- expect(Database::ConsistencyCheckService).not_to receive(:new)
- expect(worker).not_to receive(:log_extra_metadata_on_done)
- worker.perform
- end
- end
-
- context 'feature flag is enabled' do
- before do
- stub_feature_flags(ci_namespace_mirrors_consistency_check: true)
- end
-
- it 'executes the consistency check on namespaces' do
- expect(Database::ConsistencyCheckService).to receive(:new).and_call_original
- expected_result = { batches: 0, matches: 0, mismatches: 0, mismatches_details: [] }
- expect(worker).to receive(:log_extra_metadata_on_done).with(:results, expected_result)
- worker.perform
- end
+ it 'executes the consistency check on namespaces' do
+ expect(Database::ConsistencyCheckService).to receive(:new).and_call_original
+ expected_result = { batches: 0, matches: 0, mismatches: 0, mismatches_details: [] }
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:results, expected_result)
+ worker.perform
end
context 'logs should contain the detailed mismatches' do
@@ -37,7 +19,6 @@ RSpec.describe Database::CiNamespaceMirrorsConsistencyCheckWorker do
before do
redis_shared_state_cleanup!
- stub_feature_flags(ci_namespace_mirrors_consistency_check: true)
create_list(:namespace, 10) # This will also create Ci::NameSpaceMirror objects
missing_namespace.delete
diff --git a/spec/workers/database/ci_project_mirrors_consistency_check_worker_spec.rb b/spec/workers/database/ci_project_mirrors_consistency_check_worker_spec.rb
index f8e950d8917..8c839410ccd 100644
--- a/spec/workers/database/ci_project_mirrors_consistency_check_worker_spec.rb
+++ b/spec/workers/database/ci_project_mirrors_consistency_check_worker_spec.rb
@@ -6,29 +6,11 @@ RSpec.describe Database::CiProjectMirrorsConsistencyCheckWorker do
let(:worker) { described_class.new }
describe '#perform' do
- context 'feature flag is disabled' do
- before do
- stub_feature_flags(ci_project_mirrors_consistency_check: false)
- end
-
- it 'does not perform the consistency check on projects' do
- expect(Database::ConsistencyCheckService).not_to receive(:new)
- expect(worker).not_to receive(:log_extra_metadata_on_done)
- worker.perform
- end
- end
-
- context 'feature flag is enabled' do
- before do
- stub_feature_flags(ci_project_mirrors_consistency_check: true)
- end
-
- it 'executes the consistency check on projects' do
- expect(Database::ConsistencyCheckService).to receive(:new).and_call_original
- expected_result = { batches: 0, matches: 0, mismatches: 0, mismatches_details: [] }
- expect(worker).to receive(:log_extra_metadata_on_done).with(:results, expected_result)
- worker.perform
- end
+ it 'executes the consistency check on projects' do
+ expect(Database::ConsistencyCheckService).to receive(:new).and_call_original
+ expected_result = { batches: 0, matches: 0, mismatches: 0, mismatches_details: [] }
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:results, expected_result)
+ worker.perform
end
context 'logs should contain the detailed mismatches' do
@@ -37,7 +19,6 @@ RSpec.describe Database::CiProjectMirrorsConsistencyCheckWorker do
before do
redis_shared_state_cleanup!
- stub_feature_flags(ci_project_mirrors_consistency_check: true)
create_list(:project, 10) # This will also create Ci::ProjectMirror objects
missing_project.delete
diff --git a/spec/workers/delete_container_repository_worker_spec.rb b/spec/workers/delete_container_repository_worker_spec.rb
index ec040eab2d4..a011457444a 100644
--- a/spec/workers/delete_container_repository_worker_spec.rb
+++ b/spec/workers/delete_container_repository_worker_spec.rb
@@ -3,31 +3,119 @@
require 'spec_helper'
RSpec.describe DeleteContainerRepositoryWorker do
- let(:registry) { create(:container_repository) }
- let(:project) { registry.project }
- let(:user) { project.first_owner }
+ let_it_be(:repository) { create(:container_repository) }
- subject { described_class.new }
+ let(:project) { repository.project }
+ let(:user) { project.first_owner }
+ let(:worker) { described_class.new }
describe '#perform' do
+ let(:user_id) { user.id }
+ let(:repository_id) { repository.id }
+
+ subject(:perform) { worker.perform(user_id, repository_id) }
+
it 'executes the destroy service' do
- service = instance_double(Projects::ContainerRepository::DestroyService)
- expect(service).to receive(:execute)
- expect(Projects::ContainerRepository::DestroyService).to receive(:new).with(project, user).and_return(service)
+ expect_destroy_service_execution
+
+ perform
+ end
+
+ context 'with an invalid user id' do
+ let(:user_id) { -1 }
+
+ it { expect { perform }.not_to raise_error }
+ end
- subject.perform(user.id, registry.id)
+ context 'with an invalid repository id' do
+ let(:repository_id) { -1 }
+
+ it { expect { perform }.not_to raise_error }
end
- it 'does not raise error when user could not be found' do
- expect do
- subject.perform(-1, registry.id)
- end.not_to raise_error
+ context 'with a repository being migrated', :freeze_time do
+ before do
+ stub_application_setting(
+ container_registry_pre_import_tags_rate: 0.5,
+ container_registry_import_timeout: 10.minutes.to_i
+ )
+ end
+
+ shared_examples 'destroying the repository' do
+ it 'does destroy the repository' do
+ expect_next_found_instance_of(ContainerRepository) do |container_repository|
+ expect(container_repository).not_to receive(:tags_count)
+ end
+ expect(described_class).not_to receive(:perform_in)
+ expect_destroy_service_execution
+
+ perform
+ end
+ end
+
+ shared_examples 'not re enqueuing job if feature flag is disabled' do
+ before do
+ stub_feature_flags(container_registry_migration_phase2_delete_container_repository_worker_support: false)
+ end
+
+ it_behaves_like 'destroying the repository'
+ end
+
+ context 'with migration state set to pre importing' do
+ let_it_be(:repository) { create(:container_repository, :pre_importing) }
+
+ let(:tags_count) { 60 }
+ let(:delay) { (tags_count * 0.5).seconds + 10.minutes + described_class::FIXED_DELAY }
+
+ it 'does not destroy the repository and re enqueue the job' do
+ expect_next_found_instance_of(ContainerRepository) do |container_repository|
+ expect(container_repository).to receive(:tags_count).and_return(tags_count)
+ end
+ expect(described_class).to receive(:perform_in).with(delay.from_now)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:delete_postponed, delay)
+ expect(::Projects::ContainerRepository::DestroyService).not_to receive(:new)
+
+ perform
+ end
+
+ it_behaves_like 'not re enqueuing job if feature flag is disabled'
+ end
+
+ %i[pre_import_done importing import_aborted].each do |migration_state|
+ context "with migration state set to #{migration_state}" do
+ let_it_be(:repository) { create(:container_repository, migration_state) }
+
+ let(:delay) { 10.minutes + described_class::FIXED_DELAY }
+
+ it 'does not destroy the repository and re enqueue the job' do
+ expect_next_found_instance_of(ContainerRepository) do |container_repository|
+ expect(container_repository).not_to receive(:tags_count)
+ end
+ expect(described_class).to receive(:perform_in).with(delay.from_now)
+ expect(worker).to receive(:log_extra_metadata_on_done).with(:delete_postponed, delay)
+ expect(::Projects::ContainerRepository::DestroyService).not_to receive(:new)
+
+ perform
+ end
+
+ it_behaves_like 'not re enqueuing job if feature flag is disabled'
+ end
+ end
+
+ %i[default import_done import_skipped].each do |migration_state|
+ context "with migration state set to #{migration_state}" do
+ let_it_be(:repository) { create(:container_repository, migration_state) }
+
+ it_behaves_like 'destroying the repository'
+ it_behaves_like 'not re enqueuing job if feature flag is disabled'
+ end
+ end
end
- it 'does not raise error when registry could not be found' do
- expect do
- subject.perform(user.id, -1)
- end.not_to raise_error
+ def expect_destroy_service_execution
+ service = instance_double(Projects::ContainerRepository::DestroyService)
+ expect(service).to receive(:execute)
+ expect(Projects::ContainerRepository::DestroyService).to receive(:new).with(project, user).and_return(service)
end
end
end
diff --git a/spec/workers/deployments/hooks_worker_spec.rb b/spec/workers/deployments/hooks_worker_spec.rb
index a9240b45360..7c5f288fa57 100644
--- a/spec/workers/deployments/hooks_worker_spec.rb
+++ b/spec/workers/deployments/hooks_worker_spec.rb
@@ -7,7 +7,7 @@ RSpec.describe Deployments::HooksWorker do
describe '#perform' do
before do
- allow(ProjectServiceWorker).to receive(:perform_async)
+ allow(Integrations::ExecuteWorker).to receive(:perform_async)
end
it 'logs deployment and project IDs as metadata' do
@@ -25,7 +25,7 @@ RSpec.describe Deployments::HooksWorker do
project = deployment.project
service = create(:integrations_slack, project: project, deployment_events: true)
- expect(ProjectServiceWorker).to receive(:perform_async).with(service.id, an_instance_of(Hash))
+ expect(Integrations::ExecuteWorker).to receive(:perform_async).with(service.id, an_instance_of(Hash))
worker.perform(deployment_id: deployment.id, status_changed_at: Time.current)
end
@@ -35,13 +35,13 @@ RSpec.describe Deployments::HooksWorker do
project = deployment.project
create(:integrations_slack, project: project, deployment_events: true, active: false)
- expect(ProjectServiceWorker).not_to receive(:perform_async)
+ expect(Integrations::ExecuteWorker).not_to receive(:perform_async)
worker.perform(deployment_id: deployment.id, status_changed_at: Time.current)
end
it 'does not execute if a deployment does not exist' do
- expect(ProjectServiceWorker).not_to receive(:perform_async)
+ expect(Integrations::ExecuteWorker).not_to receive(:perform_async)
worker.perform(deployment_id: non_existing_record_id, status_changed_at: Time.current)
end
diff --git a/spec/workers/environments/auto_stop_worker_spec.rb b/spec/workers/environments/auto_stop_worker_spec.rb
index 1983cfa18ea..cb162b5a01c 100644
--- a/spec/workers/environments/auto_stop_worker_spec.rb
+++ b/spec/workers/environments/auto_stop_worker_spec.rb
@@ -23,7 +23,7 @@ RSpec.describe Environments::AutoStopWorker do
it 'stops the environment' do
expect { subject }
.to change { Environment.find_by_name('review/feature').state }
- .from('available').to('stopped')
+ .from('available').to('stopping')
end
it 'executes the stop action' do
diff --git a/spec/workers/every_sidekiq_worker_spec.rb b/spec/workers/every_sidekiq_worker_spec.rb
index 0c83a692ca8..a9e886de52a 100644
--- a/spec/workers/every_sidekiq_worker_spec.rb
+++ b/spec/workers/every_sidekiq_worker_spec.rb
@@ -180,7 +180,9 @@ RSpec.describe 'Every Sidekiq worker' do
'ClusterWaitForAppInstallationWorker' => 3,
'ClusterWaitForAppUpdateWorker' => 3,
'ClusterWaitForIngressIpAddressWorker' => 3,
+ 'Clusters::Applications::ActivateIntegrationWorker' => 3,
'Clusters::Applications::ActivateServiceWorker' => 3,
+ 'Clusters::Applications::DeactivateIntegrationWorker' => 3,
'Clusters::Applications::DeactivateServiceWorker' => 3,
'Clusters::Applications::UninstallWorker' => 3,
'Clusters::Applications::WaitForUninstallAppWorker' => 3,
@@ -192,7 +194,6 @@ RSpec.describe 'Every Sidekiq worker' do
'CreateGithubWebhookWorker' => 3,
'CreateNoteDiffFileWorker' => 3,
'CreatePipelineWorker' => 3,
- 'DastSiteValidationWorker' => 3,
'DeleteContainerRepositoryWorker' => 3,
'DeleteDiffFilesWorker' => 3,
'DeleteMergedBranchesWorker' => 3,
@@ -227,8 +228,6 @@ RSpec.describe 'Every Sidekiq worker' do
'Epics::UpdateEpicsDatesWorker' => 3,
'ErrorTrackingIssueLinkWorker' => 3,
'Experiments::RecordConversionEventWorker' => 3,
- 'ExpireJobCacheWorker' => 3,
- 'ExpirePipelineCacheWorker' => 3,
'ExportCsvWorker' => 3,
'ExternalServiceReactiveCachingWorker' => 3,
'FileHookWorker' => false,
@@ -308,11 +307,11 @@ RSpec.describe 'Every Sidekiq worker' do
'IncidentManagement::OncallRotations::PersistAllRotationsShiftsJob' => 3,
'IncidentManagement::OncallRotations::PersistShiftsJob' => 3,
'IncidentManagement::PagerDuty::ProcessIncidentWorker' => 3,
+ 'Integrations::ExecuteWorker' => 3,
+ 'Integrations::IrkerWorker' => 3,
'InvalidGpgSignatureUpdateWorker' => 3,
'IrkerWorker' => 3,
'IssuableExportCsvWorker' => 3,
- 'IssuePlacementWorker' => 3,
- 'IssueRebalancingWorker' => 3,
'Issues::PlacementWorker' => 3,
'Issues::RebalancingWorker' => 3,
'IterationsUpdateStatusWorker' => 3,
@@ -323,6 +322,7 @@ RSpec.describe 'Every Sidekiq worker' do
'JiraConnect::SyncMergeRequestWorker' => 3,
'JiraConnect::SyncProjectWorker' => 3,
'LdapGroupSyncWorker' => 3,
+ 'Licenses::ResetSubmitLicenseUsageDataBannerWorker' => 13,
'MailScheduler::IssueDueWorker' => 3,
'MailScheduler::NotificationServiceWorker' => 3,
'MembersDestroyer::UnassignIssuablesWorker' => 3,
@@ -340,7 +340,6 @@ RSpec.describe 'Every Sidekiq worker' do
'Metrics::Dashboard::PruneOldAnnotationsWorker' => 3,
'Metrics::Dashboard::SyncDashboardsWorker' => 3,
'MigrateExternalDiffsWorker' => 3,
- 'NamespacelessProjectDestroyWorker' => 3,
'Namespaces::OnboardingIssueCreatedWorker' => 3,
'Namespaces::OnboardingPipelineCreatedWorker' => 3,
'Namespaces::OnboardingProgressWorker' => 3,
@@ -378,7 +377,6 @@ RSpec.describe 'Every Sidekiq worker' do
'PostReceive' => 3,
'ProcessCommitWorker' => 3,
'ProjectCacheWorker' => 3,
- 'ProjectDailyStatisticsWorker' => 3,
'ProjectDestroyWorker' => 3,
'ProjectExportWorker' => false,
'ProjectImportScheduleWorker' => 1,
@@ -392,7 +390,6 @@ RSpec.describe 'Every Sidekiq worker' do
'Projects::ScheduleBulkRepositoryShardMovesWorker' => 3,
'Projects::UpdateRepositoryStorageWorker' => 3,
'Projects::RefreshBuildArtifactsSizeStatisticsWorker' => 0,
- 'Prometheus::CreateDefaultAlertsWorker' => 3,
'PropagateIntegrationGroupWorker' => 3,
'PropagateIntegrationInheritDescendantWorker' => 3,
'PropagateIntegrationInheritWorker' => 3,
@@ -410,9 +407,7 @@ RSpec.describe 'Every Sidekiq worker' do
'RepositoryCleanupWorker' => 3,
'RepositoryForkWorker' => 5,
'RepositoryImportWorker' => false,
- 'RepositoryRemoveRemoteWorker' => 3,
'RepositoryUpdateMirrorWorker' => false,
- 'RepositoryPushAuditEventWorker' => 3,
'RepositoryUpdateRemoteMirrorWorker' => 3,
'RequirementsManagement::ImportRequirementsCsvWorker' => 3,
'RequirementsManagement::ProcessRequirementsReportsWorker' => 3,
diff --git a/spec/workers/expire_job_cache_worker_spec.rb b/spec/workers/expire_job_cache_worker_spec.rb
deleted file mode 100644
index e9af39ed2df..00000000000
--- a/spec/workers/expire_job_cache_worker_spec.rb
+++ /dev/null
@@ -1,31 +0,0 @@
-# frozen_string_literal: true
-
-require 'spec_helper'
-
-RSpec.describe ExpireJobCacheWorker do
- let_it_be(:pipeline) { create(:ci_empty_pipeline) }
-
- let(:project) { pipeline.project }
-
- describe '#perform' do
- context 'with a job in the pipeline' do
- let_it_be(:job) { create(:ci_build, pipeline: pipeline) }
-
- let(:job_args) { job.id }
-
- it_behaves_like 'an idempotent worker'
-
- it_behaves_like 'worker with data consistency',
- described_class,
- data_consistency: :delayed
- end
-
- context 'when there is no job in the pipeline' do
- it 'does not change the etag store' do
- expect(Gitlab::EtagCaching::Store).not_to receive(:new)
-
- perform_multiple(non_existing_record_id)
- end
- end
- end
-end
diff --git a/spec/workers/expire_pipeline_cache_worker_spec.rb b/spec/workers/expire_pipeline_cache_worker_spec.rb
deleted file mode 100644
index f4c4df2e752..00000000000
--- a/spec/workers/expire_pipeline_cache_worker_spec.rb
+++ /dev/null
@@ -1,38 +0,0 @@
-# frozen_string_literal: true
-
-require 'spec_helper'
-
-RSpec.describe ExpirePipelineCacheWorker do
- let_it_be(:user) { create(:user) }
- let_it_be(:project) { create(:project) }
- let_it_be(:pipeline) { create(:ci_pipeline, project: project) }
-
- subject { described_class.new }
-
- describe '#perform' do
- it 'executes the service' do
- expect_next_instance_of(Ci::ExpirePipelineCacheService) do |instance|
- expect(instance).to receive(:execute).with(pipeline).and_call_original
- end
-
- subject.perform(pipeline.id)
- end
-
- it "doesn't do anything if the pipeline not exist" do
- expect_any_instance_of(Ci::ExpirePipelineCacheService).not_to receive(:execute)
- expect_any_instance_of(Gitlab::EtagCaching::Store).not_to receive(:touch)
-
- subject.perform(617748)
- end
-
- skip "with https://gitlab.com/gitlab-org/gitlab/-/issues/325291 resolved" do
- it_behaves_like 'an idempotent worker' do
- let(:job_args) { [pipeline.id] }
- end
- end
-
- it_behaves_like 'worker with data consistency',
- described_class,
- data_consistency: :delayed
- end
-end
diff --git a/spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb b/spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb
index 10702c17cb5..2b08a592164 100644
--- a/spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb
+++ b/spec/workers/gitlab/jira_import/stage/import_issues_worker_spec.rb
@@ -3,7 +3,7 @@
require 'spec_helper'
RSpec.describe Gitlab::JiraImport::Stage::ImportIssuesWorker do
- include JiraServiceHelper
+ include JiraIntegrationHelpers
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, import_type: 'jira') }
diff --git a/spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb b/spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb
index 52c516b9ff9..d15f2caba19 100644
--- a/spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb
+++ b/spec/workers/gitlab/jira_import/stage/import_labels_worker_spec.rb
@@ -3,7 +3,7 @@
require 'spec_helper'
RSpec.describe Gitlab::JiraImport::Stage::ImportLabelsWorker do
- include JiraServiceHelper
+ include JiraIntegrationHelpers
let_it_be(:user) { create(:user) }
let_it_be(:project) { create(:project, import_type: 'jira') }
diff --git a/spec/workers/gitlab_service_ping_worker_spec.rb b/spec/workers/gitlab_service_ping_worker_spec.rb
index abccc0dc967..057639dcf1d 100644
--- a/spec/workers/gitlab_service_ping_worker_spec.rb
+++ b/spec/workers/gitlab_service_ping_worker_spec.rb
@@ -3,8 +3,14 @@
require 'spec_helper'
RSpec.describe GitlabServicePingWorker, :clean_gitlab_redis_shared_state do
+ let(:payload) { { recorded_at: Time.current.rfc3339 } }
+
before do
allow_next_instance_of(ServicePing::SubmitService) { |service| allow(service).to receive(:execute) }
+ allow_next_instance_of(ServicePing::BuildPayload) do |service|
+ allow(service).to receive(:execute).and_return(payload)
+ end
+
allow(subject).to receive(:sleep)
end
@@ -15,10 +21,54 @@ RSpec.describe GitlabServicePingWorker, :clean_gitlab_redis_shared_state do
subject.perform
end
- it 'delegates to ServicePing::SubmitService' do
- expect_next_instance_of(ServicePing::SubmitService) { |service| expect(service).to receive(:execute) }
+ context 'with prerecord_service_ping_data feature enabled' do
+ it 'delegates to ServicePing::SubmitService' do
+ stub_feature_flags(prerecord_service_ping_data: true)
- subject.perform
+ expect_next_instance_of(ServicePing::SubmitService, payload: payload) do |service|
+ expect(service).to receive(:execute)
+ end
+
+ subject.perform
+ end
+ end
+
+ context 'with prerecord_service_ping_data feature disabled' do
+ it 'does not prerecord ServicePing, and calls SubmitService', :aggregate_failures do
+ stub_feature_flags(prerecord_service_ping_data: false)
+
+ expect(ServicePing::BuildPayload).not_to receive(:new)
+ expect(ServicePing::BuildPayload).not_to receive(:new)
+ expect_next_instance_of(ServicePing::SubmitService, payload: nil) do |service|
+ expect(service).to receive(:execute)
+ end
+ expect { subject.perform }.not_to change { RawUsageData.count }
+ end
+ end
+
+ context 'payload computation' do
+ it 'creates RawUsageData entry when there is NO entry with the same recorded_at timestamp' do
+ expect { subject.perform }.to change { RawUsageData.count }.by(1)
+ end
+
+ it 'updates RawUsageData entry when there is entry with the same recorded_at timestamp' do
+ record = create(:raw_usage_data, payload: { some_metric: 123 }, recorded_at: payload[:recorded_at])
+
+ expect { subject.perform }.to change { record.reload.payload }
+ .from("some_metric" => 123).to(payload.stringify_keys)
+ end
+
+ it 'reports errors and continue on execution' do
+ error = StandardError.new('some error')
+ allow(::ServicePing::BuildPayload).to receive(:new).and_raise(error)
+
+ expect(::Gitlab::ErrorTracking).to receive(:track_and_raise_for_dev_exception).with(error)
+ expect_next_instance_of(::ServicePing::SubmitService, payload: nil) do |service|
+ expect(service).to receive(:execute)
+ end
+
+ subject.perform
+ end
end
it "obtains a #{described_class::LEASE_TIMEOUT} second exclusive lease" do
diff --git a/spec/workers/project_service_worker_spec.rb b/spec/workers/integrations/execute_worker_spec.rb
index 55ec07ff79c..19600f35c8f 100644
--- a/spec/workers/project_service_worker_spec.rb
+++ b/spec/workers/integrations/execute_worker_spec.rb
@@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'spec_helper'
-RSpec.describe ProjectServiceWorker, '#perform' do
+RSpec.describe Integrations::ExecuteWorker, '#perform' do
let_it_be(:integration) { create(:jira_integration) }
let(:worker) { described_class.new }
@@ -36,4 +36,26 @@ RSpec.describe ProjectServiceWorker, '#perform' do
end.not_to raise_error
end
end
+
+ context 'when using the old worker class' do
+ let(:described_class) { ProjectServiceWorker }
+
+ it 'uses the correct worker attributes', :aggregate_failures do
+ expect(described_class.sidekiq_options).to include('retry' => 3, 'dead' => false)
+ expect(described_class.get_data_consistency).to eq(:always)
+ expect(described_class.get_feature_category).to eq(:integrations)
+ expect(described_class.get_urgency).to eq(:low)
+ expect(described_class.worker_has_external_dependencies?).to be(true)
+ end
+
+ it 'executes integration with given data' do
+ data = { test: 'test' }
+
+ expect_next_found_instance_of(integration.class) do |integration|
+ expect(integration).to receive(:execute).with(data)
+ end
+
+ worker.perform(integration.id, data)
+ end
+ end
end
diff --git a/spec/workers/irker_worker_spec.rb b/spec/workers/integrations/irker_worker_spec.rb
index c3d40ad2783..27dc08212ea 100644
--- a/spec/workers/irker_worker_spec.rb
+++ b/spec/workers/integrations/irker_worker_spec.rb
@@ -2,7 +2,7 @@
require 'spec_helper'
-RSpec.describe IrkerWorker, '#perform' do
+RSpec.describe Integrations::IrkerWorker, '#perform' do
let_it_be(:project) { create(:project, :repository) }
let_it_be(:user) { create(:user) }
let_it_be(:push_data) { HashWithIndifferentAccess.new(Gitlab::DataBuilder::Push.build_sample(project, user)) }
@@ -25,7 +25,7 @@ RSpec.describe IrkerWorker, '#perform' do
]
end
- let(:tcp_socket) { double('socket') }
+ let(:tcp_socket) { instance_double(TCPSocket) }
subject(:worker) { described_class.new }
@@ -35,7 +35,7 @@ RSpec.describe IrkerWorker, '#perform' do
allow(tcp_socket).to receive(:close).and_return(true)
end
- context 'local requests are not allowed' do
+ context 'when local requests are not allowed' do
before do
allow(Gitlab::CurrentSettings).to receive(:allow_local_requests_from_web_hooks_and_services?).and_return(false)
end
@@ -43,7 +43,7 @@ RSpec.describe IrkerWorker, '#perform' do
it { expect(worker.perform(*arguments)).to be_falsey }
end
- context 'connection fails' do
+ context 'when connection fails' do
before do
allow(TCPSocket).to receive(:new).and_raise(Errno::ECONNREFUSED.new('test'))
end
@@ -51,7 +51,7 @@ RSpec.describe IrkerWorker, '#perform' do
it { expect(subject.perform(*arguments)).to be_falsey }
end
- context 'connection successful' do
+ context 'when connection successful' do
before do
allow(Gitlab::CurrentSettings)
.to receive(:allow_local_requests_from_web_hooks_and_services?).and_return(true)
@@ -59,7 +59,7 @@ RSpec.describe IrkerWorker, '#perform' do
it { expect(subject.perform(*arguments)).to be_truthy }
- context 'new branch' do
+ context 'with new branch' do
it 'sends a correct message with branches url' do
branches_url = Gitlab::Routing.url_helpers
.project_branches_url(project)
@@ -74,7 +74,7 @@ RSpec.describe IrkerWorker, '#perform' do
end
end
- context 'deleted branch' do
+ context 'with deleted branch' do
it 'sends a correct message' do
push_data['after'] = '0000000000000000000000000000000000000000'
@@ -86,7 +86,7 @@ RSpec.describe IrkerWorker, '#perform' do
end
end
- context 'new commits to existing branch' do
+ context 'with new commits to existing branch' do
it 'sends a correct message with a compare url' do
compare_url = Gitlab::Routing.url_helpers
.project_compare_url(project,
@@ -101,6 +101,12 @@ RSpec.describe IrkerWorker, '#perform' do
subject.perform(*arguments)
end
end
+
+ context 'when using the old worker class' do
+ let(:described_class) { ::IrkerWorker }
+
+ it { expect(subject.perform(*arguments)).to be_truthy }
+ end
end
def wrap_message(text)
diff --git a/spec/workers/issue_placement_worker_spec.rb b/spec/workers/issue_placement_worker_spec.rb
deleted file mode 100644
index 9b5121d98e8..00000000000
--- a/spec/workers/issue_placement_worker_spec.rb
+++ /dev/null
@@ -1,151 +0,0 @@
-# frozen_string_literal: true
-
-require 'spec_helper'
-
-RSpec.describe IssuePlacementWorker do
- describe '#perform' do
- let_it_be(:time) { Time.now.utc }
- let_it_be(:group) { create(:group) }
- let_it_be(:project) { create(:project, group: group) }
- let_it_be(:author) { create(:user) }
- let_it_be(:common_attrs) { { author: author, project: project } }
- let_it_be(:unplaced) { common_attrs.merge(relative_position: nil) }
- let_it_be_with_reload(:issue) { create(:issue, **unplaced, created_at: time) }
- let_it_be_with_reload(:issue_a) { create(:issue, **unplaced, created_at: time - 1.minute) }
- let_it_be_with_reload(:issue_b) { create(:issue, **unplaced, created_at: time - 2.minutes) }
- let_it_be_with_reload(:issue_c) { create(:issue, **unplaced, created_at: time + 1.minute) }
- let_it_be_with_reload(:issue_d) { create(:issue, **unplaced, created_at: time + 2.minutes) }
- let_it_be_with_reload(:issue_e) { create(:issue, **common_attrs, relative_position: 10, created_at: time + 1.minute) }
- let_it_be_with_reload(:issue_f) { create(:issue, **unplaced, created_at: time + 1.minute) }
-
- let_it_be(:irrelevant) { create(:issue, relative_position: nil, created_at: time) }
-
- shared_examples 'running the issue placement worker' do
- let(:issue_id) { issue.id }
- let(:project_id) { project.id }
-
- it 'places all issues created at most 5 minutes before this one at the end, most recent last' do
- expect { run_worker }.not_to change { irrelevant.reset.relative_position }
-
- expect(project.issues.order_by_relative_position)
- .to eq([issue_e, issue_b, issue_a, issue, issue_c, issue_f, issue_d])
- expect(project.issues.where(relative_position: nil)).not_to exist
- end
-
- it 'schedules rebalancing if needed' do
- issue_a.update!(relative_position: RelativePositioning::MAX_POSITION)
-
- expect(Issues::RebalancingWorker).to receive(:perform_async).with(nil, nil, project.group.id)
-
- run_worker
- end
-
- context 'there are more than QUERY_LIMIT unplaced issues' do
- before_all do
- # Ensure there are more than N issues in this set
- n = described_class::QUERY_LIMIT
- create_list(:issue, n - 5, **unplaced)
- end
-
- it 'limits the sweep to QUERY_LIMIT records, and reschedules placement' do
- expect(Issue).to receive(:move_nulls_to_end)
- .with(have_attributes(count: described_class::QUERY_LIMIT))
- .and_call_original
-
- expect(Issues::PlacementWorker).to receive(:perform_async).with(nil, project.id)
-
- run_worker
-
- expect(project.issues.where(relative_position: nil)).to exist
- end
-
- it 'is eventually correct' do
- prefix = project.issues.where.not(relative_position: nil).order(:relative_position).to_a
- moved = project.issues.where.not(id: prefix.map(&:id))
-
- run_worker
-
- expect(project.issues.where(relative_position: nil)).to exist
-
- run_worker
-
- expect(project.issues.where(relative_position: nil)).not_to exist
- expect(project.issues.order(:relative_position)).to eq(prefix + moved.order(:created_at, :id))
- end
- end
-
- context 'we are passed bad IDs' do
- let(:issue_id) { non_existing_record_id }
- let(:project_id) { non_existing_record_id }
-
- def max_positions_by_project
- Issue
- .group(:project_id)
- .pluck(:project_id, Issue.arel_table[:relative_position].maximum.as('max_relative_position'))
- .to_h
- end
-
- it 'does move any issues to the end' do
- expect { run_worker }.not_to change { max_positions_by_project }
- end
-
- context 'the project_id refers to an empty project' do
- let!(:project_id) { create(:project).id }
-
- it 'does move any issues to the end' do
- expect { run_worker }.not_to change { max_positions_by_project }
- end
- end
- end
-
- it 'anticipates the failure to place the issues, and schedules rebalancing' do
- allow(Issue).to receive(:move_nulls_to_end) { raise RelativePositioning::NoSpaceLeft }
-
- expect(Issues::RebalancingWorker).to receive(:perform_async).with(nil, nil, project.group.id)
- expect(Gitlab::ErrorTracking)
- .to receive(:log_exception)
- .with(RelativePositioning::NoSpaceLeft, worker_arguments)
-
- run_worker
- end
- end
-
- context 'passing an issue ID' do
- def run_worker
- described_class.new.perform(issue_id)
- end
-
- let(:worker_arguments) { { issue_id: issue_id, project_id: nil } }
-
- it_behaves_like 'running the issue placement worker'
-
- context 'when block_issue_repositioning is enabled' do
- let(:issue_id) { issue.id }
- let(:project_id) { project.id }
-
- before do
- stub_feature_flags(block_issue_repositioning: group)
- end
-
- it 'does not run repositioning tasks' do
- expect { run_worker }.not_to change { issue.reset.relative_position }
- end
- end
- end
-
- context 'passing a project ID' do
- def run_worker
- described_class.new.perform(nil, project_id)
- end
-
- let(:worker_arguments) { { issue_id: nil, project_id: project_id } }
-
- it_behaves_like 'running the issue placement worker'
- end
- end
-
- it 'has the `until_executed` deduplicate strategy' do
- expect(described_class.get_deduplicate_strategy).to eq(:until_executed)
- expect(described_class.get_deduplication_options).to include({ including_scheduled: true })
- end
-end
diff --git a/spec/workers/issue_rebalancing_worker_spec.rb b/spec/workers/issue_rebalancing_worker_spec.rb
deleted file mode 100644
index cfb19af05b3..00000000000
--- a/spec/workers/issue_rebalancing_worker_spec.rb
+++ /dev/null
@@ -1,104 +0,0 @@
-# frozen_string_literal: true
-
-require 'spec_helper'
-
-RSpec.describe IssueRebalancingWorker, :clean_gitlab_redis_shared_state do
- describe '#perform' do
- let_it_be(:group) { create(:group) }
- let_it_be(:project) { create(:project, group: group) }
- let_it_be(:issue) { create(:issue, project: project) }
-
- shared_examples 'running the worker' do
- it 'runs an instance of Issues::RelativePositionRebalancingService' do
- service = double(execute: nil)
- service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class)
-
- expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service)
-
- described_class.new.perform(*arguments)
- end
-
- it 'anticipates there being too many concurent rebalances' do
- service = double
- service_param = arguments.second.present? ? kind_of(Project.id_in([project]).class) : kind_of(group&.all_projects.class)
-
- allow(service).to receive(:execute).and_raise(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances)
- expect(Issues::RelativePositionRebalancingService).to receive(:new).with(service_param).and_return(service)
- expect(Gitlab::ErrorTracking).to receive(:log_exception).with(Issues::RelativePositionRebalancingService::TooManyConcurrentRebalances, include(project_id: arguments.second, root_namespace_id: arguments.third))
-
- described_class.new.perform(*arguments)
- end
-
- it 'takes no action if the value is nil' do
- expect(Issues::RelativePositionRebalancingService).not_to receive(:new)
- expect(Gitlab::ErrorTracking).not_to receive(:log_exception)
-
- described_class.new.perform # all arguments are nil
- end
-
- it 'does not schedule a new rebalance if it finished under 1h ago' do
- container_type = arguments.second.present? ? ::Gitlab::Issues::Rebalancing::State::PROJECT : ::Gitlab::Issues::Rebalancing::State::NAMESPACE
- container_id = arguments.second || arguments.third
-
- Gitlab::Redis::SharedState.with do |redis|
- redis.set(::Gitlab::Issues::Rebalancing::State.send(:recently_finished_key, container_type, container_id), true)
- end
-
- expect(Issues::RelativePositionRebalancingService).not_to receive(:new)
- expect(Gitlab::ErrorTracking).not_to receive(:log_exception)
-
- described_class.new.perform(*arguments)
- end
- end
-
- shared_examples 'safely handles non-existent ids' do
- it 'anticipates the inability to find the issue' do
- expect(Gitlab::ErrorTracking).to receive(:log_exception).with(ArgumentError, include(project_id: arguments.second, root_namespace_id: arguments.third))
- expect(Issues::RelativePositionRebalancingService).not_to receive(:new)
-
- described_class.new.perform(*arguments)
- end
- end
-
- context 'without root_namespace param' do
- it_behaves_like 'running the worker' do
- let(:arguments) { [-1, project.id] }
- end
-
- it_behaves_like 'safely handles non-existent ids' do
- let(:arguments) { [nil, -1] }
- end
-
- include_examples 'an idempotent worker' do
- let(:job_args) { [-1, project.id] }
- end
-
- include_examples 'an idempotent worker' do
- let(:job_args) { [nil, -1] }
- end
- end
-
- context 'with root_namespace param' do
- it_behaves_like 'running the worker' do
- let(:arguments) { [nil, nil, group.id] }
- end
-
- it_behaves_like 'safely handles non-existent ids' do
- let(:arguments) { [nil, nil, -1] }
- end
-
- include_examples 'an idempotent worker' do
- let(:job_args) { [nil, nil, group.id] }
- end
-
- include_examples 'an idempotent worker' do
- let(:job_args) { [nil, nil, -1] }
- end
- end
- end
-
- it 'has the `until_executed` deduplicate strategy' do
- expect(described_class.get_deduplicate_strategy).to eq(:until_executed)
- expect(described_class.get_deduplication_options).to include({ including_scheduled: true })
- end
-end
diff --git a/spec/workers/loose_foreign_keys/cleanup_worker_spec.rb b/spec/workers/loose_foreign_keys/cleanup_worker_spec.rb
index 1814abfac1d..632e4fb3071 100644
--- a/spec/workers/loose_foreign_keys/cleanup_worker_spec.rb
+++ b/spec/workers/loose_foreign_keys/cleanup_worker_spec.rb
@@ -157,10 +157,10 @@ RSpec.describe LooseForeignKeys::CleanupWorker do
describe 'multi-database support' do
where(:current_minute, :configured_base_models, :expected_connection_model) do
- 2 | { main: 'ApplicationRecord', ci: 'Ci::ApplicationRecord' } | 'ApplicationRecord'
- 3 | { main: 'ApplicationRecord', ci: 'Ci::ApplicationRecord' } | 'Ci::ApplicationRecord'
- 2 | { main: 'ApplicationRecord' } | 'ApplicationRecord'
- 3 | { main: 'ApplicationRecord' } | 'ApplicationRecord'
+ 2 | { main: 'ActiveRecord::Base', ci: 'Ci::ApplicationRecord' } | 'ActiveRecord::Base'
+ 3 | { main: 'ActiveRecord::Base', ci: 'Ci::ApplicationRecord' } | 'Ci::ApplicationRecord'
+ 2 | { main: 'ActiveRecord::Base' } | 'ActiveRecord::Base'
+ 3 | { main: 'ActiveRecord::Base' } | 'ActiveRecord::Base'
end
with_them do
diff --git a/spec/workers/merge_requests/create_pipeline_worker_spec.rb b/spec/workers/merge_requests/create_pipeline_worker_spec.rb
index 06d44c45706..441d7652219 100644
--- a/spec/workers/merge_requests/create_pipeline_worker_spec.rb
+++ b/spec/workers/merge_requests/create_pipeline_worker_spec.rb
@@ -3,24 +3,50 @@
require 'spec_helper'
RSpec.describe MergeRequests::CreatePipelineWorker do
- subject(:worker) { described_class.new }
-
describe '#perform' do
let(:user) { create(:user) }
let(:project) { create(:project) }
let(:merge_request) { create(:merge_request) }
+ let(:worker) { described_class.new }
+
+ subject { worker.perform(project.id, user.id, merge_request.id) }
context 'when the objects exist' do
it 'calls the merge request create pipeline service and calls update head pipeline' do
aggregate_failures do
- expect_next_instance_of(MergeRequests::CreatePipelineService, project: project, current_user: user) do |service|
+ expect_next_instance_of(MergeRequests::CreatePipelineService,
+ project: project,
+ current_user: user,
+ params: { push_options: nil }) do |service|
expect(service).to receive(:execute).with(merge_request)
end
expect(MergeRequest).to receive(:find_by_id).with(merge_request.id).and_return(merge_request)
expect(merge_request).to receive(:update_head_pipeline)
- subject.perform(project.id, user.id, merge_request.id)
+ subject
+ end
+ end
+
+ context 'when push options are passed as Hash to the worker' do
+ let(:extra_params) { { 'push_options' => { 'ci' => { 'skip' => true } } } }
+
+ subject { worker.perform(project.id, user.id, merge_request.id, extra_params) }
+
+ it 'calls the merge request create pipeline service and calls update head pipeline' do
+ aggregate_failures do
+ expect_next_instance_of(MergeRequests::CreatePipelineService,
+ project: project,
+ current_user: user,
+ params: { push_options: { ci: { skip: true } } }) do |service|
+ expect(service).to receive(:execute).with(merge_request)
+ end
+
+ expect(MergeRequest).to receive(:find_by_id).with(merge_request.id).and_return(merge_request)
+ expect(merge_request).to receive(:update_head_pipeline)
+
+ subject
+ end
end
end
end
@@ -29,8 +55,7 @@ RSpec.describe MergeRequests::CreatePipelineWorker do
it 'does not call the create pipeline service' do
expect(MergeRequests::CreatePipelineService).not_to receive(:new)
- expect { subject.perform(project.id, user.id, merge_request.id) }
- .not_to raise_exception
+ expect { subject }.not_to raise_exception
end
end
diff --git a/spec/workers/merge_requests/update_head_pipeline_worker_spec.rb b/spec/workers/merge_requests/update_head_pipeline_worker_spec.rb
index 5e0b07067df..3574b8296a4 100644
--- a/spec/workers/merge_requests/update_head_pipeline_worker_spec.rb
+++ b/spec/workers/merge_requests/update_head_pipeline_worker_spec.rb
@@ -24,18 +24,6 @@ RSpec.describe MergeRequests::UpdateHeadPipelineWorker do
create(:merge_request, source_branch: 'feature', target_branch: "v1.1.0", source_project: project)
end
- context 'when related merge request is already merged' do
- let!(:merged_merge_request) do
- create(:merge_request, source_branch: 'master', target_branch: "branch_2", source_project: project, state: 'merged')
- end
-
- it 'does not schedule update head pipeline job' do
- expect(UpdateHeadPipelineForMergeRequestWorker).not_to receive(:perform_async).with(merged_merge_request.id)
-
- subject
- end
- end
-
context 'when the head pipeline sha equals merge request sha' do
let(:ref) { 'feature' }
@@ -52,6 +40,22 @@ RSpec.describe MergeRequests::UpdateHeadPipelineWorker do
expect(merge_request_1.reload.head_pipeline).to eq(pipeline)
expect(merge_request_2.reload.head_pipeline).to eq(pipeline)
end
+
+ context 'when the merge request is not open' do
+ before do
+ merge_request_1.close!
+ end
+
+ it 'only updates the open merge requests' do
+ merge_request_1
+ merge_request_2
+
+ subject
+
+ expect(merge_request_1.reload.head_pipeline).not_to eq(pipeline)
+ expect(merge_request_2.reload.head_pipeline).to eq(pipeline)
+ end
+ end
end
context 'when the head pipeline sha does not equal merge request sha' do
diff --git a/spec/workers/namespaceless_project_destroy_worker_spec.rb b/spec/workers/namespaceless_project_destroy_worker_spec.rb
deleted file mode 100644
index 93e8415f3bb..00000000000
--- a/spec/workers/namespaceless_project_destroy_worker_spec.rb
+++ /dev/null
@@ -1,77 +0,0 @@
-# frozen_string_literal: true
-
-require 'spec_helper'
-
-RSpec.describe NamespacelessProjectDestroyWorker do
- include ProjectForksHelper
-
- subject { described_class.new }
-
- before do
- # Stub after_save callbacks that will fail when Project has no namespace
- allow_any_instance_of(Project).to receive(:update_project_statistics).and_return(nil)
- end
-
- describe '#perform' do
- context 'project has namespace' do
- it 'does not do anything' do
- project = create(:project)
-
- subject.perform(project.id)
-
- expect(Project.unscoped.all).to include(project)
- end
- end
-
- context 'project has no namespace' do
- let!(:project) { create(:project) }
-
- before do
- allow_any_instance_of(Project).to receive(:namespace).and_return(nil)
- end
-
- context 'project not a fork of another project' do
- it "truncates the project's team" do
- expect_any_instance_of(ProjectTeam).to receive(:truncate)
-
- subject.perform(project.id)
- end
-
- it 'deletes the project' do
- subject.perform(project.id)
-
- expect(Project.unscoped.all).not_to include(project)
- end
-
- it 'does not call unlink_fork' do
- is_expected.not_to receive(:unlink_fork)
-
- subject.perform(project.id)
- end
- end
-
- context 'project forked from another' do
- let!(:parent_project) { create(:project) }
- let(:project) do
- namespaceless_project = fork_project(parent_project)
- namespaceless_project.save!
- namespaceless_project
- end
-
- it 'closes open merge requests' do
- merge_request = create(:merge_request, source_project: project, target_project: parent_project)
-
- subject.perform(project.id)
-
- expect(merge_request.reload).to be_closed
- end
-
- it 'destroys fork network members' do
- subject.perform(project.id)
-
- expect(parent_project.forked_to_members).to be_empty
- end
- end
- end
- end
-end
diff --git a/spec/workers/pages_transfer_worker_spec.rb b/spec/workers/pages_transfer_worker_spec.rb
deleted file mode 100644
index 7d17461bc5a..00000000000
--- a/spec/workers/pages_transfer_worker_spec.rb
+++ /dev/null
@@ -1,38 +0,0 @@
-# frozen_string_literal: true
-
-require 'spec_helper'
-
-RSpec.describe PagesTransferWorker do
- describe '#perform' do
- Gitlab::PagesTransfer::METHODS.each do |meth|
- context "when method is #{meth}" do
- let(:args) { [1, 2, 3] }
-
- it 'calls the service with the given arguments' do
- expect_next_instance_of(Gitlab::PagesTransfer) do |service|
- expect(service).to receive(meth).with(*args).and_return(true)
- end
-
- subject.perform(meth, args)
- end
-
- it 'raises an error when the service returns false' do
- expect_next_instance_of(Gitlab::PagesTransfer) do |service|
- expect(service).to receive(meth).with(*args).and_return(false)
- end
-
- expect { subject.perform(meth, args) }
- .to raise_error(described_class::TransferFailedError)
- end
- end
- end
-
- describe 'when method is not allowed' do
- it 'does nothing' do
- expect(Gitlab::PagesTransfer).not_to receive(:new)
-
- subject.perform('object_id', [])
- end
- end
- end
-end
diff --git a/spec/workers/pipeline_hooks_worker_spec.rb b/spec/workers/pipeline_hooks_worker_spec.rb
index 13a86c3d4fe..5d28b1e129a 100644
--- a/spec/workers/pipeline_hooks_worker_spec.rb
+++ b/spec/workers/pipeline_hooks_worker_spec.rb
@@ -25,6 +25,16 @@ RSpec.describe PipelineHooksWorker do
.not_to raise_error
end
end
+
+ context 'when the user is blocked' do
+ let(:pipeline) { create(:ci_pipeline, user: create(:user, :blocked)) }
+
+ it 'returns early without executing' do
+ expect(Ci::Pipelines::HookService).not_to receive(:new)
+
+ described_class.new.perform(pipeline.id)
+ end
+ end
end
it_behaves_like 'worker with data consistency',
diff --git a/spec/workers/pipeline_notification_worker_spec.rb b/spec/workers/pipeline_notification_worker_spec.rb
index 583c4bf1c0c..672debd0501 100644
--- a/spec/workers/pipeline_notification_worker_spec.rb
+++ b/spec/workers/pipeline_notification_worker_spec.rb
@@ -21,6 +21,20 @@ RSpec.describe PipelineNotificationWorker, :mailer do
subject.perform(non_existing_record_id)
end
+ context 'when the user is blocked' do
+ before do
+ expect_next_found_instance_of(Ci::Pipeline) do |pipeline|
+ allow(pipeline).to receive(:user) { build(:user, :blocked) }
+ end
+ end
+
+ it 'does nothing' do
+ expect(NotificationService).not_to receive(:new)
+
+ subject.perform(pipeline.id)
+ end
+ end
+
it_behaves_like 'worker with data consistency',
described_class,
data_consistency: :delayed
diff --git a/spec/workers/project_daily_statistics_worker_spec.rb b/spec/workers/project_daily_statistics_worker_spec.rb
deleted file mode 100644
index fa9d938acca..00000000000
--- a/spec/workers/project_daily_statistics_worker_spec.rb
+++ /dev/null
@@ -1,35 +0,0 @@
-# frozen_string_literal: true
-require 'spec_helper'
-
-RSpec.describe ProjectDailyStatisticsWorker, '#perform' do
- let(:worker) { described_class.new }
- let(:project) { create(:project) }
-
- describe '#perform' do
- context 'with a non-existing project' do
- it 'does nothing' do
- expect(Projects::FetchStatisticsIncrementService).not_to receive(:new)
-
- worker.perform(-1)
- end
- end
-
- context 'with an existing project without a repository' do
- it 'does nothing' do
- expect(Projects::FetchStatisticsIncrementService).not_to receive(:new)
-
- worker.perform(project.id)
- end
- end
-
- it 'calls daily_statistics_service with the given project' do
- project = create(:project, :repository)
-
- expect_next_instance_of(Projects::FetchStatisticsIncrementService, project) do |service|
- expect(service).to receive(:execute)
- end
-
- worker.perform(project.id)
- end
- end
-end
diff --git a/spec/workers/projects/inactive_projects_deletion_cron_worker_spec.rb b/spec/workers/projects/inactive_projects_deletion_cron_worker_spec.rb
index 0e7b4ea504c..ec10c66968d 100644
--- a/spec/workers/projects/inactive_projects_deletion_cron_worker_spec.rb
+++ b/spec/workers/projects/inactive_projects_deletion_cron_worker_spec.rb
@@ -5,6 +5,34 @@ require 'spec_helper'
RSpec.describe Projects::InactiveProjectsDeletionCronWorker do
include ProjectHelpers
+ shared_examples 'worker is running for more than 4 minutes' do
+ before do
+ subject.instance_variable_set(:@start_time, ::Gitlab::Metrics::System.monotonic_time - 5.minutes)
+ end
+
+ it 'stores the last processed inactive project_id in redis cache' do
+ Gitlab::Redis::Cache.with do |redis|
+ expect { worker.perform }
+ .to change { redis.get('last_processed_inactive_project_id') }.to(inactive_large_project.id.to_s)
+ end
+ end
+ end
+
+ shared_examples 'worker finishes processing in less than 4 minutes' do
+ before do
+ Gitlab::Redis::Cache.with do |redis|
+ redis.set('last_processed_inactive_project_id', inactive_large_project.id)
+ end
+ end
+
+ it 'clears the last processed inactive project_id from redis cache' do
+ Gitlab::Redis::Cache.with do |redis|
+ expect { worker.perform }
+ .to change { redis.get('last_processed_inactive_project_id') }.to(nil)
+ end
+ end
+ end
+
describe "#perform" do
subject(:worker) { described_class.new }
@@ -44,7 +72,7 @@ RSpec.describe Projects::InactiveProjectsDeletionCronWorker do
end
it 'does not invoke Projects::InactiveProjectsDeletionNotificationWorker' do
- expect(::Projects::InactiveProjectsDeletionNotificationWorker).not_to receive(:perform_in)
+ expect(::Projects::InactiveProjectsDeletionNotificationWorker).not_to receive(:perform_async)
expect(::Projects::DestroyService).not_to receive(:new)
worker.perform
@@ -68,7 +96,7 @@ RSpec.describe Projects::InactiveProjectsDeletionCronWorker do
end
it 'does not invoke Projects::InactiveProjectsDeletionNotificationWorker' do
- expect(::Projects::InactiveProjectsDeletionNotificationWorker).not_to receive(:perform_in)
+ expect(::Projects::InactiveProjectsDeletionNotificationWorker).not_to receive(:perform_async)
expect(::Projects::DestroyService).not_to receive(:new)
worker.perform
@@ -79,11 +107,12 @@ RSpec.describe Projects::InactiveProjectsDeletionCronWorker do
expect(inactive_large_project.reload.pending_delete).to eq(false)
end
+
+ it_behaves_like 'worker is running for more than 4 minutes'
+ it_behaves_like 'worker finishes processing in less than 4 minutes'
end
context 'when feature flag is enabled', :clean_gitlab_redis_shared_state, :sidekiq_inline do
- let_it_be(:delay) { anything }
-
before do
stub_feature_flags(inactive_projects_deletion: true)
end
@@ -93,8 +122,8 @@ RSpec.describe Projects::InactiveProjectsDeletionCronWorker do
expect(redis).to receive(:hset).with('inactive_projects_deletion_warning_email_notified',
"project:#{inactive_large_project.id}", Date.current)
end
- expect(::Projects::InactiveProjectsDeletionNotificationWorker).to receive(:perform_in).with(
- delay, inactive_large_project.id, deletion_date).and_call_original
+ expect(::Projects::InactiveProjectsDeletionNotificationWorker).to receive(:perform_async).with(
+ inactive_large_project.id, deletion_date).and_call_original
expect(::Projects::DestroyService).not_to receive(:new)
worker.perform
@@ -106,7 +135,7 @@ RSpec.describe Projects::InactiveProjectsDeletionCronWorker do
Date.current.to_s)
end
- expect(::Projects::InactiveProjectsDeletionNotificationWorker).not_to receive(:perform_in)
+ expect(::Projects::InactiveProjectsDeletionNotificationWorker).not_to receive(:perform_async)
expect(::Projects::DestroyService).not_to receive(:new)
worker.perform
@@ -118,7 +147,7 @@ RSpec.describe Projects::InactiveProjectsDeletionCronWorker do
15.months.ago.to_date.to_s)
end
- expect(::Projects::InactiveProjectsDeletionNotificationWorker).not_to receive(:perform_in)
+ expect(::Projects::InactiveProjectsDeletionNotificationWorker).not_to receive(:perform_async)
expect(::Projects::DestroyService).to receive(:new).with(inactive_large_project, admin_user, {})
.at_least(:once).and_call_original
@@ -131,6 +160,9 @@ RSpec.describe Projects::InactiveProjectsDeletionCronWorker do
"project:#{inactive_large_project.id}")).to be_nil
end
end
+
+ it_behaves_like 'worker is running for more than 4 minutes'
+ it_behaves_like 'worker finishes processing in less than 4 minutes'
end
it_behaves_like 'an idempotent worker'
diff --git a/spec/workers/prometheus/create_default_alerts_worker_spec.rb b/spec/workers/prometheus/create_default_alerts_worker_spec.rb
deleted file mode 100644
index d935bb20a29..00000000000
--- a/spec/workers/prometheus/create_default_alerts_worker_spec.rb
+++ /dev/null
@@ -1,13 +0,0 @@
-# frozen_string_literal: true
-
-require 'spec_helper'
-
-RSpec.describe Prometheus::CreateDefaultAlertsWorker do
- let_it_be(:project) { create(:project) }
-
- subject { described_class.new.perform(project.id) }
-
- it 'does nothing' do
- expect { subject }.not_to change { PrometheusAlert.count }
- end
-end
diff --git a/spec/workers/repository_remove_remote_worker_spec.rb b/spec/workers/repository_remove_remote_worker_spec.rb
deleted file mode 100644
index 11081ec9b37..00000000000
--- a/spec/workers/repository_remove_remote_worker_spec.rb
+++ /dev/null
@@ -1,48 +0,0 @@
-# frozen_string_literal: true
-
-require 'spec_helper'
-
-RSpec.describe RepositoryRemoveRemoteWorker do
- include ExclusiveLeaseHelpers
- include GitHelpers
-
- describe '#perform' do
- let!(:project) { create(:project, :repository) }
- let(:remote_name) { 'joe'}
- let(:lease_key) { "remove_remote_#{project.id}_#{remote_name}" }
- let(:lease_timeout) { RepositoryRemoveRemoteWorker::LEASE_TIMEOUT }
-
- it 'returns nil when project does not exist' do
- expect(subject.perform(-1, 'remote_name')).to be_nil
- end
-
- context 'when project exists' do
- before do
- allow(Project)
- .to receive(:find_by)
- .with(id: project.id)
- .and_return(project)
- end
-
- it 'does nothing when cannot obtain lease' do
- stub_exclusive_lease_taken(lease_key, timeout: lease_timeout)
-
- expect(project.repository)
- .not_to receive(:remove_remote)
- expect(subject)
- .not_to receive(:log_error)
-
- subject.perform(project.id, remote_name)
- end
-
- it 'does nothing when obtain a lease' do
- stub_exclusive_lease(lease_key, timeout: lease_timeout)
-
- expect(project.repository)
- .not_to receive(:remove_remote)
-
- subject.perform(project.id, remote_name)
- end
- end
- end
-end
diff --git a/spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb b/spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb
index ef515e43474..49730d9ab8c 100644
--- a/spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb
+++ b/spec/workers/schedule_merge_request_cleanup_refs_worker_spec.rb
@@ -25,6 +25,12 @@ RSpec.describe ScheduleMergeRequestCleanupRefsWorker do
end
end
+ it 'retries stuck cleanup schedules' do
+ expect(MergeRequest::CleanupSchedule).to receive(:stuck_retry!)
+
+ worker.perform
+ end
+
include_examples 'an idempotent worker' do
it 'schedules MergeRequestCleanupRefsWorker to be performed with capacity' do
expect(MergeRequestCleanupRefsWorker).to receive(:perform_with_capacity).twice
diff --git a/spec/workers/terraform/states/destroy_worker_spec.rb b/spec/workers/terraform/states/destroy_worker_spec.rb
new file mode 100644
index 00000000000..02e79373279
--- /dev/null
+++ b/spec/workers/terraform/states/destroy_worker_spec.rb
@@ -0,0 +1,30 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Terraform::States::DestroyWorker do
+ let(:state) { create(:terraform_state) }
+
+ describe '#perform' do
+ let(:state_id) { state.id }
+ let(:deletion_service) { instance_double(Terraform::States::DestroyService, execute: true) }
+
+ subject { described_class.new.perform(state_id) }
+
+ it 'calls the deletion service' do
+ expect(deletion_service).to receive(:execute).once
+ expect(Terraform::States::DestroyService).to receive(:new)
+ .with(state).and_return(deletion_service)
+
+ subject
+ end
+
+ context 'state no longer exists' do
+ let(:state_id) { -1 }
+
+ it 'completes without error' do
+ expect { subject }.not_to raise_error
+ end
+ end
+ end
+end
diff --git a/spec/workers/update_merge_requests_worker_spec.rb b/spec/workers/update_merge_requests_worker_spec.rb
index bd0dc2f9ef4..64fcc2bd388 100644
--- a/spec/workers/update_merge_requests_worker_spec.rb
+++ b/spec/workers/update_merge_requests_worker_spec.rb
@@ -3,28 +3,47 @@
require 'spec_helper'
RSpec.describe UpdateMergeRequestsWorker do
- include RepoHelpers
+ let_it_be(:project) { create(:project, :repository) }
+ let_it_be(:user) { create(:user) }
+ let_it_be(:oldrev) { "123456" }
+ let_it_be(:newrev) { "789012" }
+ let_it_be(:ref) { "refs/heads/test" }
- let(:project) { create(:project, :repository) }
- let(:user) { create(:user) }
-
- subject { described_class.new }
+ let(:worker) { described_class.new }
describe '#perform' do
- let(:oldrev) { "123456" }
- let(:newrev) { "789012" }
- let(:ref) { "refs/heads/test" }
-
- def perform
- subject.perform(project.id, user.id, oldrev, newrev, ref)
- end
+ subject { worker.perform(project.id, user.id, oldrev, newrev, ref) }
it 'executes MergeRequests::RefreshService with expected values' do
- expect_next_instance_of(MergeRequests::RefreshService, project: project, current_user: user) do |refresh_service|
- expect(refresh_service).to receive(:execute).with(oldrev, newrev, ref)
+ expect_next_instance_of(MergeRequests::RefreshService,
+ project: project,
+ current_user: user,
+ params: { push_options: nil }) do |service|
+ expect(service)
+ .to receive(:execute)
+ .with(oldrev, newrev, ref)
end
- perform
+ subject
+ end
+
+ context 'when push options are passed as Hash' do
+ let(:extra_params) { { 'push_options' => { 'ci' => { 'skip' => true } } } }
+
+ subject { worker.perform(project.id, user.id, oldrev, newrev, ref, extra_params) }
+
+ it 'executes MergeRequests::RefreshService with expected values' do
+ expect_next_instance_of(MergeRequests::RefreshService,
+ project: project,
+ current_user: user,
+ params: { push_options: { ci: { skip: true } } }) do |service|
+ expect(service)
+ .to receive(:execute)
+ .with(oldrev, newrev, ref)
+ end
+
+ subject
+ end
end
end
end
diff --git a/spec/workers/users/deactivate_dormant_users_worker_spec.rb b/spec/workers/users/deactivate_dormant_users_worker_spec.rb
index 20cd55e19eb..297301c45e2 100644
--- a/spec/workers/users/deactivate_dormant_users_worker_spec.rb
+++ b/spec/workers/users/deactivate_dormant_users_worker_spec.rb
@@ -7,7 +7,8 @@ RSpec.describe Users::DeactivateDormantUsersWorker do
describe '#perform' do
let_it_be(:dormant) { create(:user, last_activity_on: User::MINIMUM_INACTIVE_DAYS.days.ago.to_date) }
- let_it_be(:inactive) { create(:user, last_activity_on: nil) }
+ let_it_be(:inactive) { create(:user, last_activity_on: nil, created_at: User::MINIMUM_DAYS_CREATED.days.ago.to_date) }
+ let_it_be(:inactive_recently_created) { create(:user, last_activity_on: nil, created_at: (User::MINIMUM_DAYS_CREATED - 1).days.ago.to_date) }
subject(:worker) { described_class.new }
@@ -71,6 +72,12 @@ RSpec.describe Users::DeactivateDormantUsersWorker do
expect(human_user.reload.state).to eq('blocked')
expect(service_user.reload.state).to eq('blocked')
end
+
+ it 'does not deactivate recently created users' do
+ worker.perform
+
+ expect(inactive_recently_created.reload.state).to eq('active')
+ end
end
context 'when automatic deactivation of dormant users is disabled' do
diff --git a/spec/workers/web_hooks/destroy_worker_spec.rb b/spec/workers/web_hooks/destroy_worker_spec.rb
index fd26c8591ee..8e75610a031 100644
--- a/spec/workers/web_hooks/destroy_worker_spec.rb
+++ b/spec/workers/web_hooks/destroy_worker_spec.rb
@@ -3,6 +3,8 @@
require 'spec_helper'
RSpec.describe WebHooks::DestroyWorker do
+ include AfterNextHelpers
+
let_it_be(:project) { create(:project) }
let_it_be(:user) { create(:user) }
@@ -20,23 +22,26 @@ RSpec.describe WebHooks::DestroyWorker do
let!(:other_log) { create(:web_hook_log, web_hook: other_hook) }
it "deletes the Web hook and logs", :aggregate_failures do
+ expect(WebHooks::LogDestroyWorker).to receive(:perform_async)
+
expect { subject.perform(user.id, hook.id) }
- .to change { WebHookLog.count }.from(2).to(1)
- .and change { WebHook.count }.from(2).to(1)
+ .to change { WebHook.count }.from(2).to(1)
expect(WebHook.find(other_hook.id)).to be_present
expect(WebHookLog.find(other_log.id)).to be_present
end
it "raises and tracks an error if destroy failed" do
- allow_next_instance_of(::WebHooks::DestroyService) do |instance|
- expect(instance).to receive(:sync_destroy).with(anything).and_return({ status: :error, message: "failed" })
- end
+ expect_next(::WebHooks::DestroyService)
+ .to receive(:sync_destroy).with(anything)
+ .and_return(ServiceResponse.error(message: "failed"))
+
+ expect(Gitlab::ErrorTracking)
+ .to receive(:track_and_raise_exception)
+ .with(an_instance_of(described_class::DestroyError), { web_hook_id: hook.id })
+ .and_call_original
- expect(Gitlab::ErrorTracking).to receive(:track_exception)
- .with(an_instance_of(::WebHooks::DestroyService::DestroyError), web_hook_id: hook.id)
- .and_call_original
- expect { subject.perform(user.id, hook.id) }.to raise_error(::WebHooks::DestroyService::DestroyError)
+ expect { subject.perform(user.id, hook.id) }.to raise_error(described_class::DestroyError)
end
context 'with unknown hook' do
diff --git a/spec/workers/web_hooks/log_destroy_worker_spec.rb b/spec/workers/web_hooks/log_destroy_worker_spec.rb
new file mode 100644
index 00000000000..0c107c05360
--- /dev/null
+++ b/spec/workers/web_hooks/log_destroy_worker_spec.rb
@@ -0,0 +1,86 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe WebHooks::LogDestroyWorker do
+ include AfterNextHelpers
+
+ let_it_be(:project) { create(:project) }
+
+ subject { described_class.new }
+
+ describe "#perform" do
+ let!(:hook) { create(:project_hook, project: project) }
+ let!(:other_hook) { create(:project_hook, project: project) }
+ let!(:log) { create(:web_hook_log, web_hook: hook) }
+ let!(:other_log) { create(:web_hook_log, web_hook: other_hook) }
+
+ context 'with a Web hook' do
+ it "deletes the relevant logs", :aggregate_failures do
+ hook.destroy! # It does not depend on the presence of the hook
+
+ expect { subject.perform({ 'hook_id' => hook.id }) }
+ .to change { WebHookLog.count }.by(-1)
+
+ expect(WebHook.find(other_hook.id)).to be_present
+ expect(WebHookLog.find(other_log.id)).to be_present
+ end
+
+ it 'is idempotent' do
+ subject.perform({ 'hook_id' => hook.id })
+ subject.perform({ 'hook_id' => hook.id })
+
+ expect(hook.web_hook_logs).to be_none
+ end
+
+ it "raises and tracks an error if destroy failed" do
+ expect_next(::WebHooks::LogDestroyService)
+ .to receive(:execute).and_return(ServiceResponse.error(message: "failed"))
+
+ expect(Gitlab::ErrorTracking)
+ .to receive(:track_and_raise_exception)
+ .with(an_instance_of(described_class::DestroyError), { web_hook_id: hook.id })
+ .and_call_original
+
+ expect { subject.perform({ 'hook_id' => hook.id }) }
+ .to raise_error(described_class::DestroyError)
+ end
+
+ context 'with extra arguments' do
+ it 'does not raise an error' do
+ expect { subject.perform({ 'hook_id' => hook.id, 'extra' => true }) }.not_to raise_error
+
+ expect(WebHook.count).to eq(2)
+ expect(WebHookLog.count).to eq(1)
+ end
+ end
+ end
+
+ context 'with no arguments' do
+ it 'does not raise an error' do
+ expect { subject.perform }.not_to raise_error
+
+ expect(WebHook.count).to eq(2)
+ expect(WebHookLog.count).to eq(2)
+ end
+ end
+
+ context 'with empty arguments' do
+ it 'does not raise an error' do
+ expect { subject.perform({}) }.not_to raise_error
+
+ expect(WebHook.count).to eq(2)
+ expect(WebHookLog.count).to eq(2)
+ end
+ end
+
+ context 'with unknown hook' do
+ it 'does not raise an error' do
+ expect { subject.perform({ 'hook_id' => non_existing_record_id }) }.not_to raise_error
+
+ expect(WebHook.count).to eq(2)
+ expect(WebHookLog.count).to eq(2)
+ end
+ end
+ end
+end