Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'app/services/ci')
-rw-r--r--app/services/ci/after_requeue_job_service.rb12
-rw-r--r--app/services/ci/create_downstream_pipeline_service.rb10
-rw-r--r--app/services/ci/create_pipeline_service.rb1
-rw-r--r--app/services/ci/job_artifacts/destroy_all_expired_service.rb12
-rw-r--r--app/services/ci/job_artifacts/destroy_batch_service.rb6
-rw-r--r--app/services/ci/job_artifacts/update_unknown_locked_status_service.rb79
-rw-r--r--app/services/ci/play_build_service.rb5
-rw-r--r--app/services/ci/register_job_service.rb3
-rw-r--r--app/services/ci/retry_build_service.rb94
-rw-r--r--app/services/ci/retry_job_service.rb94
-rw-r--r--app/services/ci/retry_pipeline_service.rb2
11 files changed, 195 insertions, 123 deletions
diff --git a/app/services/ci/after_requeue_job_service.rb b/app/services/ci/after_requeue_job_service.rb
index bc70dd3bea4..1ae4639751b 100644
--- a/app/services/ci/after_requeue_job_service.rb
+++ b/app/services/ci/after_requeue_job_service.rb
@@ -22,15 +22,9 @@ module Ci
end
def dependent_jobs
- dependent_jobs = stage_dependent_jobs
- .or(needs_dependent_jobs)
- .ordered_by_stage
-
- if ::Feature.enabled?(:ci_fix_order_of_subsequent_jobs, @processable.pipeline.project, default_enabled: :yaml)
- dependent_jobs = ordered_by_dag(dependent_jobs)
- end
-
- dependent_jobs
+ ordered_by_dag(
+ stage_dependent_jobs.or(needs_dependent_jobs).ordered_by_stage
+ )
end
def process(job)
diff --git a/app/services/ci/create_downstream_pipeline_service.rb b/app/services/ci/create_downstream_pipeline_service.rb
index 034bab93108..0a0c614bb87 100644
--- a/app/services/ci/create_downstream_pipeline_service.rb
+++ b/app/services/ci/create_downstream_pipeline_service.rb
@@ -9,7 +9,7 @@ module Ci
DuplicateDownstreamPipelineError = Class.new(StandardError)
- MAX_DESCENDANTS_DEPTH = 2
+ MAX_NESTED_CHILDREN = 2
def execute(bridge)
@bridge = bridge
@@ -77,7 +77,8 @@ module Ci
# TODO: Remove this condition if favour of model validation
# https://gitlab.com/gitlab-org/gitlab/issues/38338
- if has_max_descendants_depth?
+ # only applies to parent-child pipelines not multi-project
+ if has_max_nested_children?
@bridge.drop!(:reached_max_descendant_pipelines_depth)
return false
end
@@ -129,11 +130,12 @@ module Ci
pipeline_checksums.tally.any? { |_checksum, occurrences| occurrences > 2 }
end
- def has_max_descendants_depth?
+ def has_max_nested_children?
return false unless @bridge.triggers_child_pipeline?
+ # only applies to parent-child pipelines not multi-project
ancestors_of_new_child = @bridge.pipeline.self_and_ancestors
- ancestors_of_new_child.count > MAX_DESCENDANTS_DEPTH
+ ancestors_of_new_child.count > MAX_NESTED_CHILDREN
end
def config_checksum(pipeline)
diff --git a/app/services/ci/create_pipeline_service.rb b/app/services/ci/create_pipeline_service.rb
index d53e136effb..02f25a82307 100644
--- a/app/services/ci/create_pipeline_service.rb
+++ b/app/services/ci/create_pipeline_service.rb
@@ -14,6 +14,7 @@ module Ci
Gitlab::Ci::Pipeline::Chain::Build::Associations,
Gitlab::Ci::Pipeline::Chain::Validate::Abilities,
Gitlab::Ci::Pipeline::Chain::Validate::Repository,
+ Gitlab::Ci::Pipeline::Chain::Limit::RateLimit,
Gitlab::Ci::Pipeline::Chain::Validate::SecurityOrchestrationPolicy,
Gitlab::Ci::Pipeline::Chain::Skip,
Gitlab::Ci::Pipeline::Chain::Config::Content,
diff --git a/app/services/ci/job_artifacts/destroy_all_expired_service.rb b/app/services/ci/job_artifacts/destroy_all_expired_service.rb
index c089567ec14..4070875ffe1 100644
--- a/app/services/ci/job_artifacts/destroy_all_expired_service.rb
+++ b/app/services/ci/job_artifacts/destroy_all_expired_service.rb
@@ -7,16 +7,14 @@ module Ci
include ::Gitlab::LoopHelpers
BATCH_SIZE = 100
+ LOOP_LIMIT = 500
LOOP_TIMEOUT = 5.minutes
- SMALL_LOOP_LIMIT = 100
- LARGE_LOOP_LIMIT = 500
- EXCLUSIVE_LOCK_KEY = 'expired_job_artifacts:destroy:lock'
LOCK_TIMEOUT = 6.minutes
+ EXCLUSIVE_LOCK_KEY = 'expired_job_artifacts:destroy:lock'
def initialize
@removed_artifacts_count = 0
@start_at = Time.current
- @loop_limit = ::Feature.enabled?(:ci_artifact_fast_removal_large_loop_limit, default_enabled: :yaml) ? LARGE_LOOP_LIMIT : SMALL_LOOP_LIMIT
end
##
@@ -26,8 +24,6 @@ module Ci
# preventing multiple `ExpireBuildArtifactsWorker` CRON jobs run concurrently,
# which is scheduled every 7 minutes.
def execute
- return 0 unless ::Feature.enabled?(:ci_destroy_all_expired_service, default_enabled: :yaml)
-
in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
if ::Feature.enabled?(:ci_destroy_unlocked_job_artifacts)
destroy_unlocked_job_artifacts
@@ -42,7 +38,7 @@ module Ci
private
def destroy_unlocked_job_artifacts
- loop_until(timeout: LOOP_TIMEOUT, limit: @loop_limit) do
+ loop_until(timeout: LOOP_TIMEOUT, limit: LOOP_LIMIT) do
artifacts = Ci::JobArtifact.expired_before(@start_at).artifact_unlocked.limit(BATCH_SIZE)
service_response = destroy_batch(artifacts)
@removed_artifacts_count += service_response[:destroyed_artifacts_count]
@@ -59,7 +55,7 @@ module Ci
@removed_artifacts_count += service_response[:destroyed_artifacts_count]
break if loop_timeout?
- break if index >= @loop_limit
+ break if index >= LOOP_LIMIT
end
end
diff --git a/app/services/ci/job_artifacts/destroy_batch_service.rb b/app/services/ci/job_artifacts/destroy_batch_service.rb
index d5a0a2dd885..90d157373c3 100644
--- a/app/services/ci/job_artifacts/destroy_batch_service.rb
+++ b/app/services/ci/job_artifacts/destroy_batch_service.rb
@@ -117,7 +117,7 @@ module Ci
wrongly_expired_artifacts, @job_artifacts = @job_artifacts.partition { |artifact| wrongly_expired?(artifact) }
- remove_expire_at(wrongly_expired_artifacts)
+ remove_expire_at(wrongly_expired_artifacts) if wrongly_expired_artifacts.any?
end
def fix_expire_at?
@@ -127,7 +127,9 @@ module Ci
def wrongly_expired?(artifact)
return false unless artifact.expire_at.present?
- match_date?(artifact.expire_at) && match_time?(artifact.expire_at)
+ # Although traces should never have expiration dates that don't match time & date here.
+ # we can explicitly exclude them by type since they should never be destroyed.
+ artifact.trace? || (match_date?(artifact.expire_at) && match_time?(artifact.expire_at))
end
def match_date?(expire_at)
diff --git a/app/services/ci/job_artifacts/update_unknown_locked_status_service.rb b/app/services/ci/job_artifacts/update_unknown_locked_status_service.rb
new file mode 100644
index 00000000000..0d35a90ed04
--- /dev/null
+++ b/app/services/ci/job_artifacts/update_unknown_locked_status_service.rb
@@ -0,0 +1,79 @@
+# frozen_string_literal: true
+
+module Ci
+ module JobArtifacts
+ class UpdateUnknownLockedStatusService
+ include ::Gitlab::ExclusiveLeaseHelpers
+ include ::Gitlab::LoopHelpers
+
+ BATCH_SIZE = 100
+ LOOP_TIMEOUT = 5.minutes
+ LOOP_LIMIT = 100
+ LARGE_LOOP_LIMIT = 500
+ EXCLUSIVE_LOCK_KEY = 'unknown_status_job_artifacts:update:lock'
+ LOCK_TIMEOUT = 6.minutes
+
+ def initialize
+ @removed_count = 0
+ @locked_count = 0
+ @start_at = Time.current
+ @loop_limit = Feature.enabled?(:ci_job_artifacts_backlog_large_loop_limit) ? LARGE_LOOP_LIMIT : LOOP_LIMIT
+ end
+
+ def execute
+ in_lock(EXCLUSIVE_LOCK_KEY, ttl: LOCK_TIMEOUT, retries: 1) do
+ update_locked_status_on_unknown_artifacts
+ end
+
+ { removed: @removed_count, locked: @locked_count }
+ end
+
+ private
+
+ def update_locked_status_on_unknown_artifacts
+ loop_until(timeout: LOOP_TIMEOUT, limit: @loop_limit) do
+ unknown_status_build_ids = safely_ordered_ci_job_artifacts_locked_unknown_relation.pluck_job_id.uniq
+
+ locked_pipe_build_ids = ::Ci::Build
+ .with_pipeline_locked_artifacts
+ .id_in(unknown_status_build_ids)
+ .pluck_primary_key
+
+ @locked_count += update_unknown_artifacts(locked_pipe_build_ids, Ci::JobArtifact.lockeds[:artifacts_locked])
+
+ unlocked_pipe_build_ids = unknown_status_build_ids - locked_pipe_build_ids
+ service_response = batch_destroy_artifacts(unlocked_pipe_build_ids)
+ @removed_count += service_response[:destroyed_artifacts_count]
+ end
+ end
+
+ def update_unknown_artifacts(build_ids, locked_value)
+ return 0 unless build_ids.any?
+
+ expired_locked_unknown_artifacts.for_job_ids(build_ids).update_all(locked: locked_value)
+ end
+
+ def batch_destroy_artifacts(build_ids)
+ deleteable_artifacts_relation =
+ if build_ids.any?
+ expired_locked_unknown_artifacts.for_job_ids(build_ids)
+ else
+ Ci::JobArtifact.none
+ end
+
+ Ci::JobArtifacts::DestroyBatchService.new(deleteable_artifacts_relation).execute
+ end
+
+ def expired_locked_unknown_artifacts
+ # UPDATE queries perform better without the specific order and limit
+ # https://gitlab.com/gitlab-org/gitlab/-/merge_requests/76509#note_891260455
+ Ci::JobArtifact.expired_before(@start_at).artifact_unknown
+ end
+
+ def safely_ordered_ci_job_artifacts_locked_unknown_relation
+ # Adding the ORDER and LIMIT improves performance when we don't have build_id
+ expired_locked_unknown_artifacts.limit(BATCH_SIZE).order_expired_asc
+ end
+ end
+ end
+end
diff --git a/app/services/ci/play_build_service.rb b/app/services/ci/play_build_service.rb
index 2d6b6aeee14..fbf2aad1991 100644
--- a/app/services/ci/play_build_service.rb
+++ b/app/services/ci/play_build_service.rb
@@ -14,10 +14,7 @@ module Ci
AfterRequeueJobService.new(project, current_user).execute(build)
end
else
- # Retrying in Ci::PlayBuildService is a legacy process that should be removed.
- # Instead, callers should explicitly execute Ci::RetryBuildService.
- # See https://gitlab.com/gitlab-org/gitlab/-/issues/347493.
- build.retryable? ? Ci::Build.retry(build, current_user) : build
+ Ci::RetryJobService.new(project, current_user).execute(build)[:job]
end
end
diff --git a/app/services/ci/register_job_service.rb b/app/services/ci/register_job_service.rb
index c8b475f6c48..6c9044b5089 100644
--- a/app/services/ci/register_job_service.rb
+++ b/app/services/ci/register_job_service.rb
@@ -283,7 +283,8 @@ module Ci
runner_unsupported: -> (build, params) { !build.supported_runner?(params.dig(:info, :features)) },
archived_failure: -> (build, _) { build.archived? },
project_deleted: -> (build, _) { build.project.pending_delete? },
- builds_disabled: -> (build, _) { !build.project.builds_enabled? }
+ builds_disabled: -> (build, _) { !build.project.builds_enabled? },
+ user_blocked: -> (build, _) { build.user&.blocked? }
}
end
end
diff --git a/app/services/ci/retry_build_service.rb b/app/services/ci/retry_build_service.rb
deleted file mode 100644
index 906e5cec4f3..00000000000
--- a/app/services/ci/retry_build_service.rb
+++ /dev/null
@@ -1,94 +0,0 @@
-# frozen_string_literal: true
-
-module Ci
- class RetryBuildService < ::BaseService
- include Gitlab::Utils::StrongMemoize
-
- def self.clone_accessors
- %i[pipeline project ref tag options name
- allow_failure stage stage_id stage_idx trigger_request
- yaml_variables when environment coverage_regex
- description tag_list protected needs_attributes
- job_variables_attributes resource_group scheduling_type].freeze
- end
-
- def self.extra_accessors
- []
- end
-
- def execute(build)
- build.ensure_scheduling_type!
-
- clone!(build).tap do |new_build|
- check_assignable_runners!(new_build)
- next if new_build.failed?
-
- Gitlab::OptimisticLocking.retry_lock(new_build, name: 'retry_build', &:enqueue)
- AfterRequeueJobService.new(project, current_user).execute(build)
- end
- end
-
- # rubocop: disable CodeReuse/ActiveRecord
- def clone!(build)
- # Cloning a build requires a strict type check to ensure
- # the attributes being used for the clone are taken straight
- # from the model and not overridden by other abstractions.
- raise TypeError unless build.instance_of?(Ci::Build)
-
- check_access!(build)
-
- new_build = clone_build(build)
-
- new_build.run_after_commit do
- ::Ci::CopyCrossDatabaseAssociationsService.new.execute(build, new_build)
-
- ::Deployments::CreateForBuildService.new.execute(new_build)
-
- ::MergeRequests::AddTodoWhenBuildFailsService
- .new(project: project)
- .close(new_build)
- end
-
- ::Ci::Pipelines::AddJobService.new(build.pipeline).execute!(new_build) do |job|
- BulkInsertableAssociations.with_bulk_insert do
- job.save!
- end
- end
-
- build.reset # refresh the data to get new values of `retried` and `processed`.
-
- new_build
- end
- # rubocop: enable CodeReuse/ActiveRecord
-
- private
-
- def check_access!(build)
- unless can?(current_user, :update_build, build)
- raise Gitlab::Access::AccessDeniedError, '403 Forbidden'
- end
- end
-
- def check_assignable_runners!(build); end
-
- def clone_build(build)
- project.builds.new(build_attributes(build))
- end
-
- def build_attributes(build)
- attributes = self.class.clone_accessors.to_h do |attribute|
- [attribute, build.public_send(attribute)] # rubocop:disable GitlabSecurity/PublicSend
- end
-
- if build.persisted_environment.present?
- attributes[:metadata_attributes] ||= {}
- attributes[:metadata_attributes][:expanded_environment_name] = build.expanded_environment_name
- end
-
- attributes[:user] = current_user
- attributes
- end
- end
-end
-
-Ci::RetryBuildService.prepend_mod_with('Ci::RetryBuildService')
diff --git a/app/services/ci/retry_job_service.rb b/app/services/ci/retry_job_service.rb
new file mode 100644
index 00000000000..af7e7fa16e9
--- /dev/null
+++ b/app/services/ci/retry_job_service.rb
@@ -0,0 +1,94 @@
+# frozen_string_literal: true
+
+module Ci
+ class RetryJobService < ::BaseService
+ include Gitlab::Utils::StrongMemoize
+
+ def execute(job)
+ if job.retryable?
+ job.ensure_scheduling_type!
+ new_job = retry_job(job)
+
+ ServiceResponse.success(payload: { job: new_job })
+ else
+ ServiceResponse.error(
+ message: 'Job cannot be retried',
+ payload: { job: job, reason: :not_retryable }
+ )
+ end
+ end
+
+ # rubocop: disable CodeReuse/ActiveRecord
+ def clone!(job)
+ # Cloning a job requires a strict type check to ensure
+ # the attributes being used for the clone are taken straight
+ # from the model and not overridden by other abstractions.
+ raise TypeError unless job.instance_of?(Ci::Build)
+
+ check_access!(job)
+
+ new_job = clone_job(job)
+
+ new_job.run_after_commit do
+ ::Ci::CopyCrossDatabaseAssociationsService.new.execute(job, new_job)
+
+ ::Deployments::CreateForBuildService.new.execute(new_job)
+
+ ::MergeRequests::AddTodoWhenBuildFailsService
+ .new(project: project)
+ .close(new_job)
+ end
+
+ ::Ci::Pipelines::AddJobService.new(job.pipeline).execute!(new_job) do |processable|
+ BulkInsertableAssociations.with_bulk_insert do
+ processable.save!
+ end
+ end
+
+ job.reset # refresh the data to get new values of `retried` and `processed`.
+
+ new_job
+ end
+ # rubocop: enable CodeReuse/ActiveRecord
+
+ private
+
+ def retry_job(job)
+ clone!(job).tap do |new_job|
+ check_assignable_runners!(new_job)
+ next if new_job.failed?
+
+ Gitlab::OptimisticLocking.retry_lock(new_job, name: 'retry_build', &:enqueue)
+ AfterRequeueJobService.new(project, current_user).execute(job)
+ end
+ end
+
+ def check_access!(job)
+ unless can?(current_user, :update_build, job)
+ raise Gitlab::Access::AccessDeniedError, '403 Forbidden'
+ end
+ end
+
+ def check_assignable_runners!(job); end
+
+ def clone_job(job)
+ project.builds.new(job_attributes(job))
+ end
+
+ def job_attributes(job)
+ attributes = job.class.clone_accessors.to_h do |attribute|
+ [attribute, job.public_send(attribute)] # rubocop:disable GitlabSecurity/PublicSend
+ end
+
+ if job.persisted_environment.present?
+ attributes[:metadata_attributes] ||= {}
+ attributes[:metadata_attributes][:expanded_environment_name] = job.expanded_environment_name
+ end
+
+ attributes[:user] = current_user
+ attributes
+ end
+ end
+end
+
+Ci::RetryJobService.prepend_mod_with('Ci::RetryJobService')
diff --git a/app/services/ci/retry_pipeline_service.rb b/app/services/ci/retry_pipeline_service.rb
index d40643e1513..85f910d05d7 100644
--- a/app/services/ci/retry_pipeline_service.rb
+++ b/app/services/ci/retry_pipeline_service.rb
@@ -13,7 +13,7 @@ module Ci
builds_relation(pipeline).find_each do |build|
next unless can_be_retried?(build)
- Ci::RetryBuildService.new(project, current_user).clone!(build)
+ Ci::RetryJobService.new(project, current_user).clone!(build)
end
pipeline.processables.latest.skipped.find_each do |skipped|