Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'app/services/ci')
-rw-r--r--app/services/ci/archive_trace_service.rb4
-rw-r--r--app/services/ci/create_pipeline_service.rb11
-rw-r--r--app/services/ci/destroy_pipeline_service.rb12
-rw-r--r--app/services/ci/job_artifacts/delete_project_artifacts_service.rb11
-rw-r--r--app/services/ci/job_artifacts/destroy_all_expired_service.rb31
-rw-r--r--app/services/ci/job_artifacts/expire_project_build_artifacts_service.rb35
-rw-r--r--app/services/ci/pipeline_processing/atomic_processing_service.rb4
-rw-r--r--app/services/ci/pipelines/add_job_service.rb6
-rw-r--r--app/services/ci/play_build_service.rb5
-rw-r--r--app/services/ci/process_build_service.rb19
-rw-r--r--app/services/ci/process_sync_events_service.rb8
-rw-r--r--app/services/ci/register_runner_service.rb36
-rw-r--r--app/services/ci/retry_build_service.rb35
-rw-r--r--app/services/ci/stuck_builds/drop_helpers.rb12
-rw-r--r--app/services/ci/update_build_queue_service.rb14
15 files changed, 158 insertions, 85 deletions
diff --git a/app/services/ci/archive_trace_service.rb b/app/services/ci/archive_trace_service.rb
index 17cac38ace2..7b1d2207460 100644
--- a/app/services/ci/archive_trace_service.rb
+++ b/app/services/ci/archive_trace_service.rb
@@ -27,6 +27,10 @@ module Ci
job.trace.archive!
job.remove_pending_state!
+ if Feature.enabled?(:datadog_integration_logs_collection, job.project) && job.job_artifacts_trace.present?
+ job.project.execute_integrations(Gitlab::DataBuilder::ArchiveTrace.build(job), :archive_trace_hooks)
+ end
+
# TODO: Remove this logging once we confirmed new live trace architecture is functional.
# See https://gitlab.com/gitlab-com/gl-infra/infrastructure/issues/4667.
unless job.has_archived_trace?
diff --git a/app/services/ci/create_pipeline_service.rb b/app/services/ci/create_pipeline_service.rb
index c1f35afba40..d53e136effb 100644
--- a/app/services/ci/create_pipeline_service.rb
+++ b/app/services/ci/create_pipeline_service.rb
@@ -95,7 +95,10 @@ module Ci
.build!
if pipeline.persisted?
- schedule_head_pipeline_update
+ Gitlab::EventStore.publish(
+ Ci::PipelineCreatedEvent.new(data: { pipeline_id: pipeline.id })
+ )
+
create_namespace_onboarding_action
else
# If pipeline is not persisted, try to recover IID
@@ -134,12 +137,6 @@ module Ci
commit.try(:id)
end
- def schedule_head_pipeline_update
- pipeline.all_merge_requests.opened.each do |merge_request|
- UpdateHeadPipelineForMergeRequestWorker.perform_async(merge_request.id)
- end
- end
-
def create_namespace_onboarding_action
Namespaces::OnboardingPipelineCreatedWorker.perform_async(project.namespace_id)
end
diff --git a/app/services/ci/destroy_pipeline_service.rb b/app/services/ci/destroy_pipeline_service.rb
index 6fbde5d291c..d85e52e1312 100644
--- a/app/services/ci/destroy_pipeline_service.rb
+++ b/app/services/ci/destroy_pipeline_service.rb
@@ -9,12 +9,12 @@ module Ci
pipeline.cancel_running if pipeline.cancelable?
- # Ci::Pipeline#destroy triggers `use_fast_destroy :job_artifacts` and
- # ci_builds has ON DELETE CASCADE to ci_pipelines. The pipeline, the builds,
- # job and pipeline artifacts all get destroyed here.
- ::Gitlab::Database::QueryAnalyzers::PreventCrossDatabaseModification.allow_cross_database_modification_within_transaction(url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/345664') do
- pipeline.reset.destroy!
- end
+ # The pipeline, the builds, job and pipeline artifacts all get destroyed here.
+ # Ci::Pipeline#destroy triggers fast destroy on job_artifacts and
+ # build_trace_chunks to remove the records and data stored in object storage.
+ # ci_builds records are deleted using ON DELETE CASCADE from ci_pipelines
+ #
+ pipeline.reset.destroy!
ServiceResponse.success(message: 'Pipeline not found')
rescue ActiveRecord::RecordNotFound
diff --git a/app/services/ci/job_artifacts/delete_project_artifacts_service.rb b/app/services/ci/job_artifacts/delete_project_artifacts_service.rb
new file mode 100644
index 00000000000..61394573748
--- /dev/null
+++ b/app/services/ci/job_artifacts/delete_project_artifacts_service.rb
@@ -0,0 +1,11 @@
+# frozen_string_literal: true
+
+module Ci
+ module JobArtifacts
+ class DeleteProjectArtifactsService < BaseProjectService
+ def execute
+ ExpireProjectBuildArtifactsWorker.perform_async(project.id)
+ end
+ end
+ end
+end
diff --git a/app/services/ci/job_artifacts/destroy_all_expired_service.rb b/app/services/ci/job_artifacts/destroy_all_expired_service.rb
index 7fa56677a0c..c089567ec14 100644
--- a/app/services/ci/job_artifacts/destroy_all_expired_service.rb
+++ b/app/services/ci/job_artifacts/destroy_all_expired_service.rb
@@ -8,13 +8,15 @@ module Ci
BATCH_SIZE = 100
LOOP_TIMEOUT = 5.minutes
- LOOP_LIMIT = 1000
+ SMALL_LOOP_LIMIT = 100
+ LARGE_LOOP_LIMIT = 500
EXCLUSIVE_LOCK_KEY = 'expired_job_artifacts:destroy:lock'
LOCK_TIMEOUT = 6.minutes
def initialize
@removed_artifacts_count = 0
@start_at = Time.current
+ @loop_limit = ::Feature.enabled?(:ci_artifact_fast_removal_large_loop_limit, default_enabled: :yaml) ? LARGE_LOOP_LIMIT : SMALL_LOOP_LIMIT
end
##
@@ -24,6 +26,8 @@ module Ci
# preventing multiple `ExpireBuildArtifactsWorker` CRON jobs run concurrently,
# which is scheduled every 7 minutes.
def execute
+ return 0 unless ::Feature.enabled?(:ci_destroy_all_expired_service, default_enabled: :yaml)
+
in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
if ::Feature.enabled?(:ci_destroy_unlocked_job_artifacts)
destroy_unlocked_job_artifacts
@@ -38,34 +42,13 @@ module Ci
private
def destroy_unlocked_job_artifacts
- loop_until(timeout: LOOP_TIMEOUT, limit: LOOP_LIMIT) do
+ loop_until(timeout: LOOP_TIMEOUT, limit: @loop_limit) do
artifacts = Ci::JobArtifact.expired_before(@start_at).artifact_unlocked.limit(BATCH_SIZE)
service_response = destroy_batch(artifacts)
@removed_artifacts_count += service_response[:destroyed_artifacts_count]
-
- update_locked_status_on_unknown_artifacts if service_response[:destroyed_artifacts_count] == 0
-
- # Return a truthy value here to prevent exiting #loop_until
- @removed_artifacts_count
end
end
- def update_locked_status_on_unknown_artifacts
- build_ids = Ci::JobArtifact.expired_before(@start_at).artifact_unknown.limit(BATCH_SIZE).distinct_job_ids
-
- return unless build_ids.present?
-
- locked_pipeline_build_ids = ::Ci::Build.with_pipeline_locked_artifacts.id_in(build_ids).pluck_primary_key
- unlocked_pipeline_build_ids = build_ids - locked_pipeline_build_ids
-
- update_unknown_artifacts(locked_pipeline_build_ids, Ci::JobArtifact.lockeds[:artifacts_locked])
- update_unknown_artifacts(unlocked_pipeline_build_ids, Ci::JobArtifact.lockeds[:unlocked])
- end
-
- def update_unknown_artifacts(build_ids, locked_value)
- Ci::JobArtifact.for_job_ids(build_ids).update_all(locked: locked_value) if build_ids.any?
- end
-
def destroy_job_artifacts_with_slow_iteration
Ci::JobArtifact.expired_before(@start_at).each_batch(of: BATCH_SIZE, column: :expire_at, order: :desc) do |relation, index|
# For performance reasons, join with ci_pipelines after the batch is queried.
@@ -76,7 +59,7 @@ module Ci
@removed_artifacts_count += service_response[:destroyed_artifacts_count]
break if loop_timeout?
- break if index >= LOOP_LIMIT
+ break if index >= @loop_limit
end
end
diff --git a/app/services/ci/job_artifacts/expire_project_build_artifacts_service.rb b/app/services/ci/job_artifacts/expire_project_build_artifacts_service.rb
new file mode 100644
index 00000000000..836b1d39736
--- /dev/null
+++ b/app/services/ci/job_artifacts/expire_project_build_artifacts_service.rb
@@ -0,0 +1,35 @@
+# frozen_string_literal: true
+
+module Ci
+ module JobArtifacts
+ class ExpireProjectBuildArtifactsService
+ BATCH_SIZE = 1000
+
+ def initialize(project_id, expiry_time)
+ @project_id = project_id
+ @expiry_time = expiry_time
+ end
+
+ # rubocop:disable CodeReuse/ActiveRecord
+ def execute
+ scope = Ci::JobArtifact.for_project(project_id).order(:id)
+ file_type_values = Ci::JobArtifact.erasable_file_types.map { |file_type| [Ci::JobArtifact.file_types[file_type]] }
+ from_sql = Arel::Nodes::Grouping.new(Arel::Nodes::ValuesList.new(file_type_values)).as('file_types (file_type)').to_sql
+ array_scope = Ci::JobArtifact.from(from_sql).select(:file_type)
+ array_mapping_scope = -> (file_type_expression) { Ci::JobArtifact.where(Ci::JobArtifact.arel_table[:file_type].eq(file_type_expression)) }
+
+ Gitlab::Pagination::Keyset::Iterator
+ .new(scope: scope, in_operator_optimization_options: { array_scope: array_scope, array_mapping_scope: array_mapping_scope })
+ .each_batch(of: BATCH_SIZE) do |batch|
+ ids = batch.reselect!(:id).to_a.map(&:id)
+ Ci::JobArtifact.unlocked.where(id: ids).update_all(locked: Ci::JobArtifact.lockeds[:unlocked], expire_at: expiry_time)
+ end
+ end
+ # rubocop:enable CodeReuse/ActiveRecord
+
+ private
+
+ attr_reader :project_id, :expiry_time
+ end
+ end
+end
diff --git a/app/services/ci/pipeline_processing/atomic_processing_service.rb b/app/services/ci/pipeline_processing/atomic_processing_service.rb
index d8ce063ffb4..508d9c3f2e1 100644
--- a/app/services/ci/pipeline_processing/atomic_processing_service.rb
+++ b/app/services/ci/pipeline_processing/atomic_processing_service.rb
@@ -36,9 +36,7 @@ module Ci
update_pipeline!
update_statuses_processed!
- if Feature.enabled?(:expire_job_and_pipeline_cache_synchronously, pipeline.project, default_enabled: :yaml)
- Ci::ExpirePipelineCacheService.new.execute(pipeline)
- end
+ Ci::ExpirePipelineCacheService.new.execute(pipeline)
true
end
diff --git a/app/services/ci/pipelines/add_job_service.rb b/app/services/ci/pipelines/add_job_service.rb
index 703bb22fb5d..fc852bc3edd 100644
--- a/app/services/ci/pipelines/add_job_service.rb
+++ b/app/services/ci/pipelines/add_job_service.rb
@@ -39,6 +39,12 @@ module Ci
job.pipeline = pipeline
job.project = pipeline.project
job.ref = pipeline.ref
+
+ # update metadata since it might have been lazily initialised before this call
+ # metadata is present on `Ci::Processable`
+ if job.respond_to?(:metadata) && job.metadata
+ job.metadata.project = pipeline.project
+ end
end
end
end
diff --git a/app/services/ci/play_build_service.rb b/app/services/ci/play_build_service.rb
index e2673c763f3..2d6b6aeee14 100644
--- a/app/services/ci/play_build_service.rb
+++ b/app/services/ci/play_build_service.rb
@@ -14,7 +14,10 @@ module Ci
AfterRequeueJobService.new(project, current_user).execute(build)
end
else
- Ci::Build.retry(build, current_user)
+ # Retrying in Ci::PlayBuildService is a legacy process that should be removed.
+ # Instead, callers should explicitly execute Ci::RetryBuildService.
+ # See https://gitlab.com/gitlab-org/gitlab/-/issues/347493.
+ build.retryable? ? Ci::Build.retry(build, current_user) : build
end
end
diff --git a/app/services/ci/process_build_service.rb b/app/services/ci/process_build_service.rb
index 5271c0fe93d..e6ec65fcc91 100644
--- a/app/services/ci/process_build_service.rb
+++ b/app/services/ci/process_build_service.rb
@@ -4,14 +4,7 @@ module Ci
class ProcessBuildService < BaseService
def execute(build, current_status)
if valid_statuses_for_build(build).include?(current_status)
- if build.schedulable?
- build.schedule
- elsif build.action?
- build.actionize
- else
- enqueue(build)
- end
-
+ process(build)
true
else
build.skip
@@ -21,6 +14,16 @@ module Ci
private
+ def process(build)
+ if build.schedulable?
+ build.schedule
+ elsif build.action?
+ build.actionize
+ else
+ enqueue(build)
+ end
+ end
+
def enqueue(build)
build.enqueue
end
diff --git a/app/services/ci/process_sync_events_service.rb b/app/services/ci/process_sync_events_service.rb
index 6be8c41dc6a..11ce6e8eeaf 100644
--- a/app/services/ci/process_sync_events_service.rb
+++ b/app/services/ci/process_sync_events_service.rb
@@ -28,18 +28,16 @@ module Ci
return if events.empty?
- first = events.first
- last_processed = nil
+ processed_events = []
begin
events.each do |event|
@sync_class.sync!(event)
- last_processed = event
+ processed_events << event
end
ensure
- # remove events till the one that was last succesfully processed
- @sync_event_class.id_in(first.id..last_processed.id).delete_all if last_processed
+ @sync_event_class.id_in(processed_events).delete_all
end
end
diff --git a/app/services/ci/register_runner_service.rb b/app/services/ci/register_runner_service.rb
new file mode 100644
index 00000000000..0a2027e33ce
--- /dev/null
+++ b/app/services/ci/register_runner_service.rb
@@ -0,0 +1,36 @@
+# frozen_string_literal: true
+
+module Ci
+ class RegisterRunnerService
+ def execute(registration_token, attributes)
+ runner_type_attrs = check_token_and_extract_attrs(registration_token)
+
+ return unless runner_type_attrs
+
+ ::Ci::Runner.create(attributes.merge(runner_type_attrs))
+ end
+
+ private
+
+ def check_token_and_extract_attrs(registration_token)
+ if runner_registration_token_valid?(registration_token)
+ # Create shared runner. Requires admin access
+ { runner_type: :instance_type }
+ elsif runner_registrar_valid?('project') && project = ::Project.find_by_runners_token(registration_token)
+ # Create a specific runner for the project
+ { runner_type: :project_type, projects: [project] }
+ elsif runner_registrar_valid?('group') && group = ::Group.find_by_runners_token(registration_token)
+ # Create a specific runner for the group
+ { runner_type: :group_type, groups: [group] }
+ end
+ end
+
+ def runner_registration_token_valid?(registration_token)
+ ActiveSupport::SecurityUtils.secure_compare(registration_token, Gitlab::CurrentSettings.runners_registration_token)
+ end
+
+ def runner_registrar_valid?(type)
+ Feature.disabled?(:runner_registration_control) || Gitlab::CurrentSettings.valid_runner_registrars.include?(type)
+ end
+ end
+end
diff --git a/app/services/ci/retry_build_service.rb b/app/services/ci/retry_build_service.rb
index 89fe4ff9f60..7e5d5373648 100644
--- a/app/services/ci/retry_build_service.rb
+++ b/app/services/ci/retry_build_service.rb
@@ -25,10 +25,6 @@ module Ci
Gitlab::OptimisticLocking.retry_lock(new_build, name: 'retry_build', &:enqueue)
AfterRequeueJobService.new(project, current_user).execute(build)
-
- ::MergeRequests::AddTodoWhenBuildFailsService
- .new(project: project, current_user: current_user)
- .close(new_build)
end
end
@@ -42,16 +38,25 @@ module Ci
check_access!(build)
new_build = clone_build(build)
+
+ new_build.run_after_commit do
+ ::MergeRequests::AddTodoWhenBuildFailsService
+ .new(project: project)
+ .close(new_build)
+ end
+
+ if create_deployment_in_separate_transaction?
+ new_build.run_after_commit do |new_build|
+ ::Deployments::CreateForBuildService.new.execute(new_build)
+ end
+ end
+
::Ci::Pipelines::AddJobService.new(build.pipeline).execute!(new_build) do |job|
BulkInsertableAssociations.with_bulk_insert do
job.save!
end
end
- if create_deployment_in_separate_transaction?
- clone_deployment!(new_build, build)
- end
-
build.reset # refresh the data to get new values of `retried` and `processed`.
new_build
@@ -95,20 +100,6 @@ module Ci
.deployment_attributes_for(new_build, old_build.persisted_environment)
end
- def clone_deployment!(new_build, old_build)
- return unless old_build.deployment.present?
-
- # We should clone the previous deployment attributes instead of initializing
- # new object with `Seed::Deployment`.
- # See https://gitlab.com/gitlab-org/gitlab/-/issues/347206
- deployment = ::Gitlab::Ci::Pipeline::Seed::Deployment
- .new(new_build, old_build.persisted_environment).to_resource
-
- return unless deployment
-
- new_build.create_deployment!(deployment.attributes)
- end
-
def create_deployment_in_separate_transaction?
strong_memoize(:create_deployment_in_separate_transaction) do
::Feature.enabled?(:create_deployment_in_separate_transaction, project, default_enabled: :yaml)
diff --git a/app/services/ci/stuck_builds/drop_helpers.rb b/app/services/ci/stuck_builds/drop_helpers.rb
index f79b805c23d..048b52c6e13 100644
--- a/app/services/ci/stuck_builds/drop_helpers.rb
+++ b/app/services/ci/stuck_builds/drop_helpers.rb
@@ -34,7 +34,7 @@ module Ci
# rubocop: enable CodeReuse/ActiveRecord
def drop_build(type, build, reason)
- Gitlab::AppLogger.info "#{self.class}: Dropping #{type} build #{build.id} for runner #{build.runner_id} (status: #{build.status}, failure_reason: #{reason})"
+ log_dropping_message(type, build, reason)
Gitlab::OptimisticLocking.retry_lock(build, 3, name: 'stuck_ci_jobs_worker_drop_build') do |b|
b.drop(reason)
end
@@ -53,6 +53,16 @@ module Ci
project_id: build.project_id
)
end
+
+ def log_dropping_message(type, build, reason)
+ Gitlab::AppLogger.info(class: self.class.name,
+ message: "Dropping #{type} build",
+ build_stuck_type: type,
+ build_id: build.id,
+ runner_id: build.runner_id,
+ build_status: build.status,
+ build_failure_reason: reason)
+ end
end
end
end
diff --git a/app/services/ci/update_build_queue_service.rb b/app/services/ci/update_build_queue_service.rb
index 146239bb7e5..2e38969c7a9 100644
--- a/app/services/ci/update_build_queue_service.rb
+++ b/app/services/ci/update_build_queue_service.rb
@@ -99,17 +99,15 @@ module Ci
private
def tick_for(build, runners)
- ::Gitlab::Database.allow_cross_joins_across_databases(url: 'https://gitlab.com/gitlab-org/gitlab/-/issues/339937') do
- runners = runners.with_recent_runner_queue
- runners = runners.with_tags if Feature.enabled?(:ci_preload_runner_tags, default_enabled: :yaml)
+ runners = runners.with_recent_runner_queue
+ runners = runners.with_tags if Feature.enabled?(:ci_preload_runner_tags, default_enabled: :yaml)
- metrics.observe_active_runners(-> { runners.to_a.size })
+ metrics.observe_active_runners(-> { runners.to_a.size })
- runners.each do |runner|
- metrics.increment_runner_tick(runner)
+ runners.each do |runner|
+ metrics.increment_runner_tick(runner)
- runner.pick_build!(build)
- end
+ runner.pick_build!(build)
end
end