diff options
Diffstat (limited to 'app/workers/concerns/waitable_worker.rb')
-rw-r--r-- | app/workers/concerns/waitable_worker.rb | 22 |
1 files changed, 5 insertions, 17 deletions
diff --git a/app/workers/concerns/waitable_worker.rb b/app/workers/concerns/waitable_worker.rb index f8b945b8892..336d60d46ac 100644 --- a/app/workers/concerns/waitable_worker.rb +++ b/app/workers/concerns/waitable_worker.rb @@ -5,25 +5,13 @@ module WaitableWorker class_methods do # Schedules multiple jobs and waits for them to be completed. - def bulk_perform_and_wait(args_list, timeout: 10) + def bulk_perform_and_wait(args_list) # Short-circuit: it's more efficient to do small numbers of jobs inline - return bulk_perform_inline(args_list) if args_list.size <= 3 - - # Don't wait if there's too many jobs to be waited for. Not including the - # waiter allows them to be deduplicated and it skips waiting for jobs that - # are not likely to finish within the timeout. This assumes we can process - # 10 jobs per second: - # https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/205 - return bulk_perform_async(args_list) if args_list.length >= 10 * timeout - - waiter = Gitlab::JobWaiter.new(args_list.size, worker_label: self.to_s) - - # Point all the bulk jobs at the same JobWaiter. Converts, [[1], [2], [3]] - # into [[1, "key"], [2, "key"], [3, "key"]] - waiting_args_list = args_list.map { |args| [*args, waiter.key] } - bulk_perform_async(waiting_args_list) + if args_list.size == 1 + return bulk_perform_inline(args_list) + end - waiter.wait(timeout) + bulk_perform_async(args_list) end # Performs multiple jobs directly. Failed jobs will be put into sidekiq so |