1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
|
# frozen_string_literal: true
class PipelineScheduleWorker # rubocop:disable Scalability/IdempotentWorker
include ApplicationWorker
data_consistency :always
include CronjobQueue
include ::Gitlab::ExclusiveLeaseHelpers
LOCK_RETRY = 3
LOCK_TTL = 5.minutes
DELAY = 7.seconds
BATCH_SIZE = 500
feature_category :continuous_integration
worker_resource_boundary :cpu
def perform
in_lock(lock_key, **lock_params) do
Ci::PipelineSchedule
.select(:id, :owner_id, :project_id) # Minimize the selected columns
.runnable_schedules
.preloaded
.find_in_batches(batch_size: BATCH_SIZE).with_index do |schedules, index| # rubocop: disable CodeReuse/ActiveRecord -- activates because of batch_size
enqueue_run_pipeline_schedule_worker(schedules, index)
end
end
end
private
def lock_key
self.class.name.underscore
end
def lock_params
{
ttl: LOCK_TTL,
retries: LOCK_RETRY
}
end
def enqueue_run_pipeline_schedule_worker(schedules, index)
if ::Feature.enabled?(:run_pipeline_schedule_worker_with_delay)
RunPipelineScheduleWorker.bulk_perform_in_with_contexts(
[1, index * DELAY].max,
schedules,
arguments_proc: ->(schedule) { [schedule.id, schedule.owner_id, { scheduling: true }] },
context_proc: ->(schedule) { { project: schedule.project, user: schedule.owner } }
)
else
RunPipelineScheduleWorker.bulk_perform_async_with_contexts(
schedules,
arguments_proc: ->(schedule) { [schedule.id, schedule.owner_id, { scheduling: true }] },
context_proc: ->(schedule) { { project: schedule.project, user: schedule.owner } }
)
end
end
end
|