1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
|
# frozen_string_literal: true
module Gitlab
module Database
module BackgroundMigration
SplitAndRetryError = Class.new(StandardError)
ReduceSubBatchSizeError = Class.new(StandardError)
class BatchedJob < SharedModel
include EachBatch
include FromUnion
self.table_name = :batched_background_migration_jobs
MAX_ATTEMPTS = 3
MIN_BATCH_SIZE = 1
SUB_BATCH_SIZE_REDUCE_FACTOR = 0.75
SUB_BATCH_SIZE_THRESHOLD = 65
STUCK_JOBS_TIMEOUT = 1.hour.freeze
TIMEOUT_EXCEPTIONS = [ActiveRecord::StatementTimeout, ActiveRecord::ConnectionTimeoutError,
ActiveRecord::AdapterTimeout, ActiveRecord::LockWaitTimeout,
ActiveRecord::QueryCanceled].freeze
belongs_to :batched_migration, foreign_key: :batched_background_migration_id
has_many :batched_job_transition_logs, foreign_key: :batched_background_migration_job_id
scope :active, -> { with_statuses(:pending, :running) }
scope :stuck, -> { active.where('updated_at <= ?', STUCK_JOBS_TIMEOUT.ago) }
scope :retriable, -> { from_union([with_status(:failed).where('attempts < ?', MAX_ATTEMPTS), self.stuck]) }
scope :except_succeeded, -> { without_status(:succeeded) }
scope :successful_in_execution_order, -> { where.not(finished_at: nil).with_status(:succeeded).order(:finished_at) }
scope :with_preloads, -> { preload(:batched_migration) }
scope :created_since, ->(date_time) { where('created_at >= ?', date_time) }
scope :blocked_by_max_attempts, -> { where('attempts >= ?', MAX_ATTEMPTS) }
state_machine :status, initial: :pending do
state :pending, value: 0
state :running, value: 1
state :failed, value: 2
state :succeeded, value: 3
event :succeed do
transition any => :succeeded
end
event :failure do
transition any => :failed
end
event :run do
transition any => :running
end
before_transition any => [:failed, :succeeded] do |job|
job.finished_at = Time.current
end
before_transition any => :running do |job|
job.attempts += 1
job.started_at = Time.current
job.finished_at = nil
job.metrics = {}
end
after_transition any => :failed do |job, transition|
exception, from_sub_batch = job.class.extract_transition_options(transition.args)
job.reduce_sub_batch_size! if from_sub_batch && job.can_reduce_sub_batch_size?
job.split_and_retry! if job.can_split?(exception)
rescue SplitAndRetryError, ReduceSubBatchSizeError => error
Gitlab::AppLogger.error(
message: error.message,
batched_job_id: job.id,
batched_migration_id: job.batched_migration.id,
job_class_name: job.migration_job_class_name,
job_arguments: job.migration_job_arguments
)
end
after_transition do |job, transition|
exception, _ = job.class.extract_transition_options(transition.args)
job.batched_job_transition_logs.create(previous_status: transition.from, next_status: transition.to, exception_class: exception&.class, exception_message: exception&.message)
Gitlab::ErrorTracking.track_exception(exception, batched_job_id: job.id, job_class_name: job.migration_job_class_name, job_arguments: job.migration_job_arguments) if exception
Gitlab::AppLogger.info(
message: 'BatchedJob transition',
batched_job_id: job.id,
previous_state: transition.from_name,
new_state: transition.to_name,
batched_migration_id: job.batched_migration.id,
job_class_name: job.migration_job_class_name,
job_arguments: job.migration_job_arguments,
exception_class: exception&.class,
exception_message: exception&.message
)
end
end
delegate :job_class, :table_name, :column_name, :job_arguments, :job_class_name,
to: :batched_migration, prefix: :migration
def self.extract_transition_options(args)
error_hash = args.find { |arg| arg[:error].present? }
return [] unless error_hash
exception = error_hash.fetch(:error)
from_sub_batch = error_hash[:from_sub_batch]
[exception, from_sub_batch]
end
def time_efficiency
return unless succeeded?
return unless finished_at && started_at
duration = finished_at - started_at
# TODO: Switch to individual job interval (prereq: https://gitlab.com/gitlab-org/gitlab/-/issues/328801)
duration.to_f / batched_migration.interval
end
def can_split?(exception)
return if still_retryable?
exception.class.in?(TIMEOUT_EXCEPTIONS) && within_batch_size_boundaries?
end
def can_reduce_sub_batch_size?
return false unless Feature.enabled?(:reduce_sub_batch_size_on_timeouts)
still_retryable? && within_batch_size_boundaries?
end
def split_and_retry!
with_lock do
raise SplitAndRetryError, 'Only failed jobs can be split' unless failed?
new_batch_size = batch_size / 2
break update!(attempts: 0) if new_batch_size < 1
batching_strategy = batched_migration.batch_class.new(connection: self.class.connection)
next_batch_bounds = batching_strategy.next_batch(
batched_migration.table_name,
batched_migration.column_name,
batch_min_value: min_value,
batch_size: new_batch_size,
job_arguments: batched_migration.job_arguments,
job_class: batched_migration.job_class
)
midpoint = next_batch_bounds.last
# We don't want the midpoint to go over the existing max_value because
# those IDs would already be in the next batched migration job.
# This could happen when a lot of records in the current batch are deleted.
#
# In this case, we just lower the batch size so that future calls to this
# method could eventually split the job if it continues to fail.
if midpoint >= max_value
update!(batch_size: new_batch_size, attempts: 0)
else
old_max_value = max_value
update!(
batch_size: new_batch_size,
max_value: midpoint,
attempts: 0,
started_at: nil,
finished_at: nil,
metrics: {}
)
new_record = dup
new_record.min_value = midpoint.next
new_record.max_value = old_max_value
new_record.save!
end
end
end
# It reduces the size of +sub_batch_size+ by 25%
def reduce_sub_batch_size!
raise ReduceSubBatchSizeError, 'Only sub_batch_size of failed jobs can be reduced' unless failed?
return if sub_batch_exceeds_threshold?
with_lock do
actual_sub_batch_size = sub_batch_size
reduced_sub_batch_size = (sub_batch_size * SUB_BATCH_SIZE_REDUCE_FACTOR).to_i.clamp(1, batch_size)
update!(sub_batch_size: reduced_sub_batch_size)
Gitlab::AppLogger.warn(
message: 'Sub batch size reduced due to timeout',
batched_job_id: id,
sub_batch_size: actual_sub_batch_size,
reduced_sub_batch_size: reduced_sub_batch_size,
attempts: attempts,
batched_migration_id: batched_migration.id,
job_class_name: migration_job_class_name,
job_arguments: migration_job_arguments
)
end
end
def still_retryable?
attempts < MAX_ATTEMPTS
end
def within_batch_size_boundaries?
batch_size > MIN_BATCH_SIZE && batch_size > sub_batch_size
end
# It doesn't allow sub-batch size to be reduced lower than the threshold
#
# @info It will prevent the next iteration to reduce the +sub_batch_size+ lower
# than the +SUB_BATCH_SIZE_THRESHOLD+ or 65% of its original size.
def sub_batch_exceeds_threshold?
initial_sub_batch_size = batched_migration.sub_batch_size
reduced_sub_batch_size = (sub_batch_size * SUB_BATCH_SIZE_REDUCE_FACTOR).to_i
diff = initial_sub_batch_size - reduced_sub_batch_size
(1.0 * diff / initial_sub_batch_size * 100).round(2) > SUB_BATCH_SIZE_THRESHOLD
end
end
end
end
end
|