Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2022-06-07 21:09:27 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2022-06-07 21:09:27 +0300
commit5cda8c8a420399ca9687c4a981fefd50ce5a1fdd (patch)
tree6050d7517a36798c9586e153df20a0696c5fcd4f
parent7bbc731c75d0b8bf7c74ba77d521266d2ed0a1fc (diff)
Add latest changes from gitlab-org/gitlab@master
-rw-r--r--.gitlab/CODEOWNERS1
-rw-r--r--app/assets/javascripts/access_tokens/components/access_token_table_app.vue92
-rw-r--r--app/assets/javascripts/access_tokens/components/constants.js61
-rw-r--r--app/assets/javascripts/access_tokens/components/new_access_token_app.vue21
-rw-r--r--app/assets/javascripts/pages/admin/impersonation_tokens/index.js10
-rw-r--r--app/assets/javascripts/pages/groups/settings/access_tokens/index.js8
-rw-r--r--app/assets/javascripts/pages/projects/settings/access_tokens/index.js8
-rw-r--r--app/models/hooks/web_hook_log.rb6
-rw-r--r--app/models/users/callout.rb5
-rw-r--r--app/services/ci/job_artifacts/destroy_all_expired_service.rb2
-rw-r--r--app/services/ci/job_artifacts/destroy_batch_service.rb24
-rw-r--r--app/services/web_hook_service.rb44
-rw-r--r--app/views/admin/hook_logs/show.html.haml5
-rw-r--r--app/views/projects/hook_logs/show.html.haml5
-rw-r--r--app/views/shared/hook_logs/_content.html.haml7
-rw-r--r--config/initializers/1_settings.rb3
-rw-r--r--doc/.vale/gitlab/Uppercase.yml3
-rw-r--r--doc/administration/postgresql/pgbouncer.md4
-rw-r--r--doc/administration/troubleshooting/postgresql.md14
-rw-r--r--doc/api/graphql/reference/index.md1
-rw-r--r--doc/architecture/blueprints/database/scalability/patterns/read_mostly.md2
-rw-r--r--doc/architecture/blueprints/database/scalability/patterns/time_decay.md2
-rw-r--r--doc/ci/yaml/index.md11
-rw-r--r--doc/development/database/efficient_in_operator_queries.md4
-rw-r--r--doc/development/database/loose_foreign_keys.md22
-rw-r--r--doc/development/database/migrations_for_multiple_databases.md4
-rw-r--r--doc/development/database/strings_and_the_text_data_type.md16
-rw-r--r--doc/development/database_query_comments.md4
-rw-r--r--doc/development/insert_into_tables_in_batches.md24
-rw-r--r--doc/development/ordering_table_columns.md14
-rw-r--r--doc/development/query_performance.md8
-rw-r--r--doc/development/swapping_tables.md12
-rw-r--r--doc/development/understanding_explain_plans.md42
-rw-r--r--doc/user/admin_area/settings/visibility_and_access_controls.md27
-rw-r--r--doc/user/group/index.md5
-rw-r--r--doc/user/infrastructure/iac/terraform_state.md2
-rw-r--r--lib/gitlab/ci/templates/Security/DAST.latest.gitlab-ci.yml7
-rw-r--r--lib/gitlab/project_stats_refresh_conflicts_logger.rb10
-rw-r--r--locale/gitlab.pot3
-rw-r--r--spec/features/admin/admin_hook_logs_spec.rb14
-rw-r--r--spec/features/projects/hook_logs/user_reads_log_spec.rb73
-rw-r--r--spec/frontend/access_tokens/components/access_token_table_app_spec.js17
-rw-r--r--spec/frontend/access_tokens/components/new_access_token_app_spec.js9
-rw-r--r--spec/lib/gitlab/project_stats_refresh_conflicts_logger_spec.rb20
-rw-r--r--spec/models/users/callout_spec.rb12
-rw-r--r--spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb10
-rw-r--r--spec/services/ci/job_artifacts/destroy_batch_service_spec.rb112
-rw-r--r--spec/services/web_hook_service_spec.rb192
-rw-r--r--spec/workers/every_sidekiq_worker_spec.rb1
49 files changed, 753 insertions, 250 deletions
diff --git a/.gitlab/CODEOWNERS b/.gitlab/CODEOWNERS
index e0e304a361c..6a4a58fe5db 100644
--- a/.gitlab/CODEOWNERS
+++ b/.gitlab/CODEOWNERS
@@ -632,6 +632,7 @@ lib/gitlab/checks/** @proglottis @toon @zj-gitlab
/doc/topics/offline/index.md @axil
/doc/topics/offline/quick_start_guide.md @axil
/doc/topics/plan_and_track.md @msedlakjakubowski
+/doc/tutorials/ @kpaizee
/doc/update/ @axil
/doc/update/mysql_to_postgresql.md @aqualls
/doc/update/upgrading_postgresql_using_slony.md @aqualls
diff --git a/app/assets/javascripts/access_tokens/components/access_token_table_app.vue b/app/assets/javascripts/access_tokens/components/access_token_table_app.vue
index e936ad8aa14..5fe285c0896 100644
--- a/app/assets/javascripts/access_tokens/components/access_token_table_app.vue
+++ b/app/assets/javascripts/access_tokens/components/access_token_table_app.vue
@@ -1,24 +1,23 @@
<script>
-import { GlButton, GlIcon, GlLink, GlTable, GlTooltipDirective } from '@gitlab/ui';
+import { GlButton, GlIcon, GlLink, GlPagination, GlTable, GlTooltipDirective } from '@gitlab/ui';
import { helpPagePath } from '~/helpers/help_page_helper';
import { convertObjectPropsToCamelCase } from '~/lib/utils/common_utils';
-import { __, s__, sprintf } from '~/locale';
+import { __, sprintf } from '~/locale';
import DomElementListener from '~/vue_shared/components/dom_element_listener.vue';
import TimeAgoTooltip from '~/vue_shared/components/time_ago_tooltip.vue';
import UserDate from '~/vue_shared/components/user_date.vue';
-
-const FORM_SELECTOR = '#js-new-access-token-form';
-const SUCCESS_EVENT = 'ajax:success';
+import { EVENT_SUCCESS, FIELDS, FORM_SELECTOR, INITIAL_PAGE, PAGE_SIZE } from './constants';
export default {
- FORM_SELECTOR,
- SUCCESS_EVENT,
+ EVENT_SUCCESS,
+ PAGE_SIZE,
name: 'AccessTokenTableApp',
components: {
DomElementListener,
GlButton,
GlIcon,
GlLink,
+ GlPagination,
GlTable,
TimeAgoTooltip,
UserDate,
@@ -39,58 +38,6 @@ export default {
revokeButton: __('Revoke'),
tokenValidity: __('Token valid until revoked'),
},
- fields: [
- {
- key: 'name',
- label: __('Token name'),
- sortable: true,
- tdClass: `gl-text-black-normal`,
- thClass: `gl-text-black-normal`,
- },
- {
- formatter(scopes) {
- return scopes?.length ? scopes.join(', ') : __('no scopes selected');
- },
- key: 'scopes',
- label: __('Scopes'),
- sortable: true,
- tdClass: `gl-text-black-normal`,
- thClass: `gl-text-black-normal`,
- },
- {
- key: 'createdAt',
- label: s__('AccessTokens|Created'),
- sortable: true,
- tdClass: `gl-text-black-normal`,
- thClass: `gl-text-black-normal`,
- },
- {
- key: 'lastUsedAt',
- label: __('Last Used'),
- sortable: true,
- tdClass: `gl-text-black-normal`,
- thClass: `gl-text-black-normal`,
- },
- {
- key: 'expiresAt',
- label: __('Expires'),
- sortable: true,
- tdClass: `gl-text-black-normal`,
- thClass: `gl-text-black-normal`,
- },
- {
- key: 'role',
- label: __('Role'),
- tdClass: `gl-text-black-normal`,
- thClass: `gl-text-black-normal`,
- sortable: true,
- },
- {
- key: 'action',
- label: __('Action'),
- thClass: `gl-text-black-normal`,
- },
- ],
inject: [
'accessTokenType',
'accessTokenTypePlural',
@@ -101,13 +48,15 @@ export default {
data() {
return {
activeAccessTokens: this.initialActiveAccessTokens,
+ currentPage: INITIAL_PAGE,
};
},
computed: {
filteredFields() {
- return this.showRole
- ? this.$options.fields
- : this.$options.fields.filter((field) => field.key !== 'role');
+ return this.showRole ? FIELDS : FIELDS.filter((field) => field.key !== 'role');
+ },
+ formSelector() {
+ return `#${FORM_SELECTOR}`;
},
header() {
return sprintf(this.$options.i18n.header, {
@@ -120,11 +69,15 @@ export default {
accessTokenType: this.accessTokenType,
});
},
+ showPagination() {
+ return this.activeAccessTokens.length > PAGE_SIZE;
+ },
},
methods: {
onSuccess(event) {
const [{ active_access_tokens: activeAccessTokens }] = event.detail;
this.activeAccessTokens = convertObjectPropsToCamelCase(activeAccessTokens, { deep: true });
+ this.currentPage = INITIAL_PAGE;
},
sortingChanged(aRow, bRow, key) {
if (['createdAt', 'lastUsedAt', 'expiresAt'].includes(key)) {
@@ -144,7 +97,7 @@ export default {
</script>
<template>
- <dom-element-listener :selector="$options.FORM_SELECTOR" @[$options.SUCCESS_EVENT]="onSuccess">
+ <dom-element-listener :selector="formSelector" @[$options.EVENT_SUCCESS]="onSuccess">
<div>
<hr />
<h5>{{ header }}</h5>
@@ -154,6 +107,8 @@ export default {
:empty-text="noActiveTokensMessage"
:fields="filteredFields"
:items="activeAccessTokens"
+ :per-page="$options.PAGE_SIZE"
+ :current-page="currentPage"
:sort-compare="sortingChanged"
show-empty
>
@@ -199,6 +154,17 @@ export default {
/>
</template>
</gl-table>
+ <gl-pagination
+ v-if="showPagination"
+ v-model="currentPage"
+ :per-page="$options.PAGE_SIZE"
+ :total-items="activeAccessTokens.length"
+ :prev-text="__('Prev')"
+ :next-text="__('Next')"
+ :label-next-page="__('Go to next page')"
+ :label-prev-page="__('Go to previous page')"
+ align="center"
+ />
</div>
</dom-element-listener>
</template>
diff --git a/app/assets/javascripts/access_tokens/components/constants.js b/app/assets/javascripts/access_tokens/components/constants.js
new file mode 100644
index 00000000000..197f20ae24c
--- /dev/null
+++ b/app/assets/javascripts/access_tokens/components/constants.js
@@ -0,0 +1,61 @@
+import { __, s__ } from '~/locale';
+
+export const EVENT_ERROR = 'ajax:error';
+export const EVENT_SUCCESS = 'ajax:success';
+export const FORM_SELECTOR = 'js-new-access-token-form';
+
+export const INITIAL_PAGE = 1;
+export const PAGE_SIZE = 100;
+
+export const FIELDS = [
+ {
+ key: 'name',
+ label: __('Token name'),
+ sortable: true,
+ tdClass: `gl-text-black-normal`,
+ thClass: `gl-text-black-normal`,
+ },
+ {
+ formatter(scopes) {
+ return scopes?.length ? scopes.join(', ') : __('no scopes selected');
+ },
+ key: 'scopes',
+ label: __('Scopes'),
+ sortable: true,
+ tdClass: `gl-text-black-normal`,
+ thClass: `gl-text-black-normal`,
+ },
+ {
+ key: 'createdAt',
+ label: s__('AccessTokens|Created'),
+ sortable: true,
+ tdClass: `gl-text-black-normal`,
+ thClass: `gl-text-black-normal`,
+ },
+ {
+ key: 'lastUsedAt',
+ label: __('Last Used'),
+ sortable: true,
+ tdClass: `gl-text-black-normal`,
+ thClass: `gl-text-black-normal`,
+ },
+ {
+ key: 'expiresAt',
+ label: __('Expires'),
+ sortable: true,
+ tdClass: `gl-text-black-normal`,
+ thClass: `gl-text-black-normal`,
+ },
+ {
+ key: 'role',
+ label: __('Role'),
+ tdClass: `gl-text-black-normal`,
+ thClass: `gl-text-black-normal`,
+ sortable: true,
+ },
+ {
+ key: 'action',
+ label: __('Action'),
+ thClass: `gl-text-black-normal`,
+ },
+];
diff --git a/app/assets/javascripts/access_tokens/components/new_access_token_app.vue b/app/assets/javascripts/access_tokens/components/new_access_token_app.vue
index 5aeabcefad5..a34f3c7dedf 100644
--- a/app/assets/javascripts/access_tokens/components/new_access_token_app.vue
+++ b/app/assets/javascripts/access_tokens/components/new_access_token_app.vue
@@ -4,15 +4,11 @@ import { createAlert, VARIANT_INFO } from '~/flash';
import { __, n__, sprintf } from '~/locale';
import DomElementListener from '~/vue_shared/components/dom_element_listener.vue';
import InputCopyToggleVisibility from '~/vue_shared/components/form/input_copy_toggle_visibility.vue';
-
-const ERROR_EVENT = 'ajax:error';
-const FORM_SELECTOR = '#js-new-access-token-form';
-const SUCCESS_EVENT = 'ajax:success';
+import { EVENT_ERROR, EVENT_SUCCESS, FORM_SELECTOR } from './constants';
export default {
- ERROR_EVENT,
- FORM_SELECTOR,
- SUCCESS_EVENT,
+ EVENT_ERROR,
+ EVENT_SUCCESS,
name: 'NewAccessTokenApp',
components: { DomElementListener, GlAlert, InputCopyToggleVisibility },
i18n: {
@@ -50,13 +46,16 @@ export default {
name: this.$options.tokenInputId,
};
},
+ formSelector() {
+ return `#${FORM_SELECTOR}`;
+ },
label() {
return sprintf(this.$options.i18n.label, { accessTokenType: this.accessTokenType });
},
},
mounted() {
/** @type {HTMLFormElement} */
- this.form = document.querySelector(this.$options.FORM_SELECTOR);
+ this.form = document.getElementById(FORM_SELECTOR);
/** @type {HTMLInputElement} */
this.submitButton = this.form.querySelector('input[type=submit]');
@@ -93,9 +92,9 @@ export default {
<template>
<dom-element-listener
- :selector="$options.FORM_SELECTOR"
- @[$options.ERROR_EVENT]="onError"
- @[$options.SUCCESS_EVENT]="onSuccess"
+ :selector="formSelector"
+ @[$options.EVENT_ERROR]="onError"
+ @[$options.EVENT_SUCCESS]="onSuccess"
>
<div ref="container">
<template v-if="newToken">
diff --git a/app/assets/javascripts/pages/admin/impersonation_tokens/index.js b/app/assets/javascripts/pages/admin/impersonation_tokens/index.js
index 8fbc8dc17bc..d86ac891977 100644
--- a/app/assets/javascripts/pages/admin/impersonation_tokens/index.js
+++ b/app/assets/javascripts/pages/admin/impersonation_tokens/index.js
@@ -1,8 +1,14 @@
-import { initExpiresAtField } from '~/access_tokens';
+import {
+ initAccessTokenTableApp,
+ initExpiresAtField,
+ initNewAccessTokenApp,
+} from '~/access_tokens';
import { initAdminUserActions, initDeleteUserModals } from '~/admin/users';
import initConfirmModal from '~/confirm_modal';
+initAccessTokenTableApp();
+initExpiresAtField();
+initNewAccessTokenApp();
initAdminUserActions();
initDeleteUserModals();
-initExpiresAtField();
initConfirmModal();
diff --git a/app/assets/javascripts/pages/groups/settings/access_tokens/index.js b/app/assets/javascripts/pages/groups/settings/access_tokens/index.js
index dc1bb88bf4b..b9f282a123c 100644
--- a/app/assets/javascripts/pages/groups/settings/access_tokens/index.js
+++ b/app/assets/javascripts/pages/groups/settings/access_tokens/index.js
@@ -1,3 +1,9 @@
-import { initExpiresAtField } from '~/access_tokens';
+import {
+ initAccessTokenTableApp,
+ initExpiresAtField,
+ initNewAccessTokenApp,
+} from '~/access_tokens';
+initAccessTokenTableApp();
initExpiresAtField();
+initNewAccessTokenApp();
diff --git a/app/assets/javascripts/pages/projects/settings/access_tokens/index.js b/app/assets/javascripts/pages/projects/settings/access_tokens/index.js
index dc1bb88bf4b..b9f282a123c 100644
--- a/app/assets/javascripts/pages/projects/settings/access_tokens/index.js
+++ b/app/assets/javascripts/pages/projects/settings/access_tokens/index.js
@@ -1,3 +1,9 @@
-import { initExpiresAtField } from '~/access_tokens';
+import {
+ initAccessTokenTableApp,
+ initExpiresAtField,
+ initNewAccessTokenApp,
+} from '~/access_tokens';
+initAccessTokenTableApp();
initExpiresAtField();
+initNewAccessTokenApp();
diff --git a/app/models/hooks/web_hook_log.rb b/app/models/hooks/web_hook_log.rb
index a95dd0473b6..2f03b3591cf 100644
--- a/app/models/hooks/web_hook_log.rb
+++ b/app/models/hooks/web_hook_log.rb
@@ -7,6 +7,8 @@ class WebHookLog < ApplicationRecord
include CreatedAtFilterable
include PartitionedTable
+ OVERSIZE_REQUEST_DATA = { 'oversize' => true }.freeze
+
self.primary_key = :id
partitioned_by :created_at, strategy: :monthly, retain_for: 3.months
@@ -41,6 +43,10 @@ class WebHookLog < ApplicationRecord
response_status == WebHookService::InternalErrorResponse::ERROR_MESSAGE
end
+ def oversize?
+ request_data == OVERSIZE_REQUEST_DATA
+ end
+
private
def obfuscate_basic_auth
diff --git a/app/models/users/callout.rb b/app/models/users/callout.rb
index b3729c84dd6..96094a33e64 100644
--- a/app/models/users/callout.rb
+++ b/app/models/users/callout.rb
@@ -51,12 +51,15 @@ module Users
attention_requests_side_nav: 48,
minute_limit_banner: 49,
preview_user_over_limit_free_plan_alert: 50, # EE-only
- user_reached_limit_free_plan_alert: 51 # EE-only
+ user_reached_limit_free_plan_alert: 51, # EE-only
+ submit_license_usage_data_banner: 52 # EE-only
}
validates :feature_name,
presence: true,
uniqueness: { scope: :user_id },
inclusion: { in: Users::Callout.feature_names.keys }
+
+ scope :with_feature_name, -> (feature_name) { where(feature_name: feature_name) }
end
end
diff --git a/app/services/ci/job_artifacts/destroy_all_expired_service.rb b/app/services/ci/job_artifacts/destroy_all_expired_service.rb
index 4070875ffe1..b5dd5b843c6 100644
--- a/app/services/ci/job_artifacts/destroy_all_expired_service.rb
+++ b/app/services/ci/job_artifacts/destroy_all_expired_service.rb
@@ -60,7 +60,7 @@ module Ci
end
def destroy_batch(artifacts)
- Ci::JobArtifacts::DestroyBatchService.new(artifacts).execute
+ Ci::JobArtifacts::DestroyBatchService.new(artifacts, skip_projects_on_refresh: true).execute
end
def loop_timeout?
diff --git a/app/services/ci/job_artifacts/destroy_batch_service.rb b/app/services/ci/job_artifacts/destroy_batch_service.rb
index e8d21f14ee6..49b65f13804 100644
--- a/app/services/ci/job_artifacts/destroy_batch_service.rb
+++ b/app/services/ci/job_artifacts/destroy_batch_service.rb
@@ -17,15 +17,20 @@ module Ci
# +pick_up_at+:: When to pick up for deletion of files
# Returns:
# +Hash+:: A hash with status and destroyed_artifacts_count keys
- def initialize(job_artifacts, pick_up_at: nil, fix_expire_at: fix_expire_at?)
+ def initialize(job_artifacts, pick_up_at: nil, fix_expire_at: fix_expire_at?, skip_projects_on_refresh: false)
@job_artifacts = job_artifacts.with_destroy_preloads.to_a
@pick_up_at = pick_up_at
@fix_expire_at = fix_expire_at
+ @skip_projects_on_refresh = skip_projects_on_refresh
end
# rubocop: disable CodeReuse/ActiveRecord
def execute(update_stats: true)
- track_artifacts_undergoing_stats_refresh
+ if @skip_projects_on_refresh
+ exclude_artifacts_undergoing_stats_refresh
+ else
+ track_artifacts_undergoing_stats_refresh
+ end
# Detect and fix artifacts that had `expire_at` wrongly backfilled by migration
# https://gitlab.com/gitlab-org/gitlab/-/merge_requests/47723
@@ -169,6 +174,21 @@ module Ci
)
end
end
+
+ def exclude_artifacts_undergoing_stats_refresh
+ project_ids = Set.new
+
+ @job_artifacts.reject! do |artifact|
+ next unless artifact.project.refreshing_build_artifacts_size?
+
+ project_ids << artifact.project_id
+ end
+
+ Gitlab::ProjectStatsRefreshConflictsLogger.warn_skipped_artifact_deletion_during_stats_refresh(
+ method: 'Ci::JobArtifacts::DestroyBatchService#execute',
+ project_ids: project_ids
+ )
+ end
end
end
end
diff --git a/app/services/web_hook_service.rb b/app/services/web_hook_service.rb
index c0727e52cc3..6526e6a4c5e 100644
--- a/app/services/web_hook_service.rb
+++ b/app/services/web_hook_service.rb
@@ -26,6 +26,12 @@ class WebHookService
end
REQUEST_BODY_SIZE_LIMIT = 25.megabytes
+ # Response body is for UI display only. It does not make much sense to save
+ # whatever the receivers throw back at us
+ RESPONSE_BODY_SIZE_LIMIT = 8.kilobytes
+ # The headers are for debugging purpose. They are displayed on the UI only.
+ RESPONSE_HEADERS_COUNT_LIMIT = 50
+ RESPONSE_HEADERS_SIZE_LIMIT = 1.kilobytes
attr_accessor :hook, :data, :hook_name, :request_options
attr_reader :uniqueness_token
@@ -141,7 +147,7 @@ class WebHookService
execution_duration: execution_duration,
request_headers: build_headers,
request_data: data,
- response_headers: format_response_headers(response),
+ response_headers: safe_response_headers(response),
response_body: safe_response_body(response),
response_status: response.code,
internal_error_message: error_message
@@ -150,8 +156,21 @@ class WebHookService
if @force # executed as part of test - run log-execution inline.
::WebHooks::LogExecutionService.new(hook: hook, log_data: log_data, response_category: category).execute
else
- ::WebHooks::LogExecutionWorker
- .perform_async(hook.id, log_data, category, uniqueness_token)
+ queue_log_execution_with_retry(log_data, category)
+ end
+ end
+
+ def queue_log_execution_with_retry(log_data, category)
+ retried = false
+ begin
+ ::WebHooks::LogExecutionWorker.perform_async(hook.id, log_data, category, uniqueness_token)
+ rescue Gitlab::SidekiqMiddleware::SizeLimiter::ExceedLimitError
+ raise if retried
+
+ # Strip request data
+ log_data[:request_data] = ::WebHookLog::OVERSIZE_REQUEST_DATA
+ retried = true
+ retry
end
end
@@ -181,14 +200,19 @@ class WebHookService
# Make response headers more stylish
# Net::HTTPHeader has downcased hash with arrays: { 'content-type' => ['text/html; charset=utf-8'] }
# This method format response to capitalized hash with strings: { 'Content-Type' => 'text/html; charset=utf-8' }
- def format_response_headers(response)
- response.headers.each_capitalized.to_h
+ # rubocop:disable Style/HashTransformValues
+ def safe_response_headers(response)
+ response.headers.each_capitalized.first(RESPONSE_HEADERS_COUNT_LIMIT).to_h do |header_key, header_value|
+ [enforce_utf8(header_key), string_size_limit(enforce_utf8(header_value), RESPONSE_HEADERS_SIZE_LIMIT)]
+ end
end
+ # rubocop:enable Style/HashTransformValues
def safe_response_body(response)
return '' unless response.body
- response.body.encode('UTF-8', invalid: :replace, undef: :replace, replace: '')
+ response_body = enforce_utf8(response.body)
+ string_size_limit(response_body, RESPONSE_BODY_SIZE_LIMIT)
end
def rate_limited?
@@ -229,4 +253,12 @@ class WebHookService
**Gitlab::ApplicationContext.current
)
end
+
+ def string_size_limit(str, limit)
+ str.truncate_bytes(limit)
+ end
+
+ def enforce_utf8(str)
+ Gitlab::EncodingHelper.encode_utf8(str)
+ end
end
diff --git a/app/views/admin/hook_logs/show.html.haml b/app/views/admin/hook_logs/show.html.haml
index abfabbb5eb6..2ace46c0acf 100644
--- a/app/views/admin/hook_logs/show.html.haml
+++ b/app/views/admin/hook_logs/show.html.haml
@@ -4,6 +4,9 @@
%hr
-= link_to _("Resend Request"), retry_admin_hook_hook_log_path(@hook, @hook_log), method: :post, class: "btn gl-button btn-default float-right gl-ml-3"
+- if @hook_log.oversize?
+ = button_tag _("Resend Request"), class: "btn gl-button btn-default float-right gl-ml-3 has-tooltip", disabled: true, title: _("Request data is too large")
+- else
+ = link_to _("Resend Request"), retry_admin_hook_hook_log_path(@hook, @hook_log), method: :post, class: "btn gl-button btn-default float-right gl-ml-3"
= render partial: 'shared/hook_logs/content', locals: { hook_log: @hook_log }
diff --git a/app/views/projects/hook_logs/show.html.haml b/app/views/projects/hook_logs/show.html.haml
index 1f71e1b7055..2e2e7ba848e 100644
--- a/app/views/projects/hook_logs/show.html.haml
+++ b/app/views/projects/hook_logs/show.html.haml
@@ -7,6 +7,9 @@
%hr
-= link_to _("Resend Request"), @hook_log.present.retry_path, method: :post, class: "btn gl-button btn-default float-right gl-ml-3"
+- if @hook_log.oversize?
+ = button_tag _("Resend Request"), class: "btn gl-button btn-default float-right gl-ml-3 has-tooltip", disabled: true, title: _("Request data is too large")
+- else
+ = link_to _("Resend Request"), @hook_log.present.retry_path, method: :post, class: "btn gl-button btn-default float-right gl-ml-3"
= render partial: 'shared/hook_logs/content', locals: { hook_log: @hook_log }
diff --git a/app/views/shared/hook_logs/_content.html.haml b/app/views/shared/hook_logs/_content.html.haml
index 932971402a2..8b5b4b6e5fa 100644
--- a/app/views/shared/hook_logs/_content.html.haml
+++ b/app/views/shared/hook_logs/_content.html.haml
@@ -30,8 +30,11 @@
%h4.gl-mt-6= _('Request')
%pre
- :escaped
- #{Gitlab::Json.pretty_generate(hook_log.request_data)}
+ - if hook_log.oversize?
+ = _('Request data is too large')
+ - else
+ :escaped
+ #{Gitlab::Json.pretty_generate(hook_log.request_data)}
%h5= _('Headers')
%pre
diff --git a/config/initializers/1_settings.rb b/config/initializers/1_settings.rb
index b38f4b306b3..8de514e9455 100644
--- a/config/initializers/1_settings.rb
+++ b/config/initializers/1_settings.rb
@@ -775,6 +775,9 @@ Gitlab.ee do
Settings.cron_jobs['ci_runners_stale_group_runners_prune_worker_cron'] ||= Settingslogic.new({})
Settings.cron_jobs['ci_runners_stale_group_runners_prune_worker_cron']['cron'] ||= '30 * * * *'
Settings.cron_jobs['ci_runners_stale_group_runners_prune_worker_cron']['job_class'] = 'Ci::Runners::StaleGroupRunnersPruneCronWorker'
+ Settings.cron_jobs['licenses_reset_submit_license_usage_data_banner'] ||= Settingslogic.new({})
+ Settings.cron_jobs['licenses_reset_submit_license_usage_data_banner']['cron'] ||= "0 0 * * *"
+ Settings.cron_jobs['licenses_reset_submit_license_usage_data_banner']['job_class'] = 'Licenses::ResetSubmitLicenseUsageDataBannerWorker'
end
#
diff --git a/doc/.vale/gitlab/Uppercase.yml b/doc/.vale/gitlab/Uppercase.yml
index c9053c20d0e..d7f4d75a012 100644
--- a/doc/.vale/gitlab/Uppercase.yml
+++ b/doc/.vale/gitlab/Uppercase.yml
@@ -48,7 +48,9 @@ exceptions:
- CVSS
- DAG
- DAST
+ - DDL
- DHCP
+ - DML
- DNS
- DOM
- DSA
@@ -151,6 +153,7 @@ exceptions:
- POST
- PROXY
- PUT
+ - QPS
- RAID
- RAM
- RBAC
diff --git a/doc/administration/postgresql/pgbouncer.md b/doc/administration/postgresql/pgbouncer.md
index 5a39565cef7..8ae2b6497f8 100644
--- a/doc/administration/postgresql/pgbouncer.md
+++ b/doc/administration/postgresql/pgbouncer.md
@@ -227,12 +227,12 @@ the database. Each of the listed services below use the following formula to def
- `headroom` can be configured via `DB_POOL_HEADROOM` environment variable (default to `10`)
To calculate the `default_pool_size`, multiply the number of instances of `puma`, `sidekiq` and `geo-logcursor` by the
-number of connections each can consume as per listed above. The total will be the suggested `default_pool_size`.
+number of connections each can consume as per listed above. The total is the suggested `default_pool_size`.
If you are using more than one PgBouncer with an internal Load Balancer, you may be able to divide the
`default_pool_size` by the number of instances to guarantee an evenly distributed load between them.
-The `pgbouncer['max_client_conn']` is the hard-limit of connections PgBouncer can accept. It's unlikely you will need
+The `pgbouncer['max_client_conn']` is the hard limit of connections PgBouncer can accept. It's unlikely you need
to change this. If you are hitting that limit, you may want to consider adding additional PgBouncers with an internal
Load Balancer.
diff --git a/doc/administration/troubleshooting/postgresql.md b/doc/administration/troubleshooting/postgresql.md
index 7f32d01d617..cdbf786bdb2 100644
--- a/doc/administration/troubleshooting/postgresql.md
+++ b/doc/administration/troubleshooting/postgresql.md
@@ -67,7 +67,7 @@ This section is for links to information elsewhere in the GitLab documentation.
- Required extension: `btree_gist`
- Errors like this in the `production/sidekiq` log; see:
- [Set default_transaction_isolation into read committed](https://docs.gitlab.com/omnibus/settings/database.html#set-default_transaction_isolation-into-read-committed):
+ [Set `default_transaction_isolation` into read committed](https://docs.gitlab.com/omnibus/settings/database.html#set-default_transaction_isolation-into-read-committed):
```plaintext
ActiveRecord::StatementInvalid PG::TRSerializationFailure: ERROR: could not serialize access due to concurrent update
@@ -138,8 +138,12 @@ idle_in_transaction_session_timeout = 60s
Quoting from issue [#30528](https://gitlab.com/gitlab-org/gitlab/-/issues/30528):
+<!-- vale gitlab.FutureTense = NO -->
+
> "If a deadlock is hit, and we resolve it through aborting the transaction after a short period, then the retry mechanisms we already have will make the deadlocked piece of work try again, and it's unlikely we'll deadlock multiple times in a row."
+<!-- vale gitlab.FutureTense = YES -->
+
NOTE:
In Support, our general approach to reconfiguring timeouts (applies also to the
HTTP stack) is that it's acceptable to do it temporarily as a workaround. If it
@@ -148,9 +152,9 @@ problem more completely, implement a hot fix, or make some other change that
addresses the root cause. Generally, the timeouts should be put back to
reasonable defaults after the root cause is resolved.
-In this case, the guidance we had from development was to drop deadlock_timeout
-or statement_timeout, but to leave the third setting at 60s. Setting
-idle_in_transaction protects the database from sessions potentially hanging for
+In this case, the guidance we had from development was to drop `deadlock_timeout`
+or `statement_timeout`, but to leave the third setting at 60 seconds. Setting
+`idle_in_transaction` protects the database from sessions potentially hanging for
days. There's more discussion in [the issue relating to introducing this timeout on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/1053).
PostgresSQL defaults:
@@ -161,7 +165,7 @@ PostgresSQL defaults:
Comments in issue [#30528](https://gitlab.com/gitlab-org/gitlab/-/issues/30528)
indicate that these should both be set to at least a number of minutes for all
Omnibus GitLab installations (so they don't hang indefinitely). However, 15s
-for statement_timeout is very short, and will only be effective if the
+for `statement_timeout` is very short, and is only effective if the
underlying infrastructure is very performant.
See current settings with:
diff --git a/doc/api/graphql/reference/index.md b/doc/api/graphql/reference/index.md
index 8263acb0052..f96611cf88b 100644
--- a/doc/api/graphql/reference/index.md
+++ b/doc/api/graphql/reference/index.md
@@ -19639,6 +19639,7 @@ Name of the feature that the callout is for.
| <a id="usercalloutfeaturenameenumstorage_enforcement_banner_fourth_enforcement_threshold"></a>`STORAGE_ENFORCEMENT_BANNER_FOURTH_ENFORCEMENT_THRESHOLD` | Callout feature name for storage_enforcement_banner_fourth_enforcement_threshold. |
| <a id="usercalloutfeaturenameenumstorage_enforcement_banner_second_enforcement_threshold"></a>`STORAGE_ENFORCEMENT_BANNER_SECOND_ENFORCEMENT_THRESHOLD` | Callout feature name for storage_enforcement_banner_second_enforcement_threshold. |
| <a id="usercalloutfeaturenameenumstorage_enforcement_banner_third_enforcement_threshold"></a>`STORAGE_ENFORCEMENT_BANNER_THIRD_ENFORCEMENT_THRESHOLD` | Callout feature name for storage_enforcement_banner_third_enforcement_threshold. |
+| <a id="usercalloutfeaturenameenumsubmit_license_usage_data_banner"></a>`SUBMIT_LICENSE_USAGE_DATA_BANNER` | Callout feature name for submit_license_usage_data_banner. |
| <a id="usercalloutfeaturenameenumsuggest_pipeline"></a>`SUGGEST_PIPELINE` | Callout feature name for suggest_pipeline. |
| <a id="usercalloutfeaturenameenumsuggest_popover_dismissed"></a>`SUGGEST_POPOVER_DISMISSED` | Callout feature name for suggest_popover_dismissed. |
| <a id="usercalloutfeaturenameenumtabs_position_highlight"></a>`TABS_POSITION_HIGHLIGHT` | Callout feature name for tabs_position_highlight. |
diff --git a/doc/architecture/blueprints/database/scalability/patterns/read_mostly.md b/doc/architecture/blueprints/database/scalability/patterns/read_mostly.md
index 75fcf10f048..0780ae3c4d5 100644
--- a/doc/architecture/blueprints/database/scalability/patterns/read_mostly.md
+++ b/doc/architecture/blueprints/database/scalability/patterns/read_mostly.md
@@ -114,7 +114,7 @@ consider in this pattern (see [#327483](https://gitlab.com/gitlab-org/gitlab/-/i
To reduce the database overhead, we implement a cache for the data and thus significantly
reduce the query frequency on the database side. There are different scopes for caching available:
-- `RequestStore`: per-request in-memory cache (based on [request_store gem](https://github.com/steveklabnik/request_store))
+- `RequestStore`: per-request in-memory cache (based on [`request_store` gem](https://github.com/steveklabnik/request_store))
- [`ProcessMemoryCache`](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/process_memory_cache.rb#L4): per-process in-memory cache (a `ActiveSupport::Cache::MemoryStore`)
- [`Gitlab::Redis::Cache`](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/redis/cache.rb) and `Rails.cache`: full-blown cache in Redis
diff --git a/doc/architecture/blueprints/database/scalability/patterns/time_decay.md b/doc/architecture/blueprints/database/scalability/patterns/time_decay.md
index 7399fd8048d..0bc4ca73464 100644
--- a/doc/architecture/blueprints/database/scalability/patterns/time_decay.md
+++ b/doc/architecture/blueprints/database/scalability/patterns/time_decay.md
@@ -71,7 +71,7 @@ The second and most important characteristic of time-decay data is that most of
able to implicitly or explicitly access the data using a date filter,
**restricting our results based on a time-related dimension**.
-There can be many such dimensions, but we are only going to focus on the creation date as it is both
+There can be many such dimensions, but we focus only on the creation date as it is both
the most commonly used, and the one that we can control and optimize against. It:
- Is immutable.
diff --git a/doc/ci/yaml/index.md b/doc/ci/yaml/index.md
index 07bcd0beb92..d7da5b4d045 100644
--- a/doc/ci/yaml/index.md
+++ b/doc/ci/yaml/index.md
@@ -339,6 +339,11 @@ The order of the items in `stages` defines the execution order for jobs:
- Jobs in the same stage run in parallel.
- Jobs in the next stage run after the jobs from the previous stage complete successfully.
+If a pipeline contains only jobs in the `.pre` or `.post` stages, it does not run.
+There must be at least one other job in a different stage. `.pre` and `.post` stages
+can be used in [required pipeline configuration](../../user/admin_area/settings/continuous_integration.md#required-pipeline-configuration)
+to define compliance jobs that must run before or after project pipeline jobs.
+
**Keyword type**: Global keyword.
**Example of `stages`**:
@@ -3510,7 +3515,8 @@ Use the `.pre` stage to make a job run at the start of a pipeline. `.pre` is
always the first stage in a pipeline. User-defined stages execute after `.pre`.
You do not have to define `.pre` in [`stages`](#stages).
-You must have a job in at least one stage other than `.pre` or `.post`.
+If a pipeline contains only jobs in the `.pre` or `.post` stages, it does not run.
+There must be at least one other job in a different stage.
**Keyword type**: You can only use it with a job's `stage` keyword.
@@ -3545,7 +3551,8 @@ Use the `.post` stage to make a job run at the end of a pipeline. `.post`
is always the last stage in a pipeline. User-defined stages execute before `.post`.
You do not have to define `.post` in [`stages`](#stages).
-You must have a job in at least one stage other than `.pre` or `.post`.
+If a pipeline contains only jobs in the `.pre` or `.post` stages, it does not run.
+There must be at least one other job in a different stage.
**Keyword type**: You can only use it with a job's `stage` keyword.
diff --git a/doc/development/database/efficient_in_operator_queries.md b/doc/development/database/efficient_in_operator_queries.md
index 4f18c937198..ef6ef232c9e 100644
--- a/doc/development/database/efficient_in_operator_queries.md
+++ b/doc/development/database/efficient_in_operator_queries.md
@@ -26,7 +26,7 @@ Pagination may be used to fetch subsequent records.
Example tasks requiring querying nested domain objects from the group level:
- Show first 20 issues by creation date or due date from the group `gitlab-org`.
-- Show first 20 merge_requests by merged at date from the group `gitlab-com`.
+- Show first 20 merge requests by merged at date from the group `gitlab-com`.
Unfortunately, ordered group-level queries typically perform badly
as their executions require heavy I/O, memory, and computations.
@@ -877,7 +877,7 @@ this cursor would be (`2020-01-05`, `3`) for `project_id=9`.
### Initializing the recursive CTE query
-For the initial recursive query, we'll need to produce exactly one row, we call this the
+For the initial recursive query, we need to produce exactly one row, we call this the
initializer query (`initializer_query`).
Use `ARRAY_AGG` function to compact the initial result set into a single row
diff --git a/doc/development/database/loose_foreign_keys.md b/doc/development/database/loose_foreign_keys.md
index 6889b9123ca..1a6d995e78c 100644
--- a/doc/development/database/loose_foreign_keys.md
+++ b/doc/development/database/loose_foreign_keys.md
@@ -515,13 +515,13 @@ referenced child tables.
### Database structure
The feature relies on triggers installed on the parent tables. When a parent record is deleted,
-the trigger will automatically insert a new record into the `loose_foreign_keys_deleted_records`
+the trigger automatically inserts a new record into the `loose_foreign_keys_deleted_records`
database table.
-The inserted record will store the following information about the deleted record:
+The inserted record stores the following information about the deleted record:
- `fully_qualified_table_name`: name of the database table where the record was located.
-- `primary_key_value`: the ID of the record, the value will be present in the child tables as
+- `primary_key_value`: the ID of the record, the value is present in the child tables as
the foreign key value. At the moment, composite primary keys are not supported, the parent table
must have an `id` column.
- `status`: defaults to pending, represents the status of the cleanup process.
@@ -532,7 +532,7 @@ several runs.
#### Database decomposition
-The `loose_foreign_keys_deleted_records` table will exist on both database servers (Ci and Main)
+The `loose_foreign_keys_deleted_records` table exists on both database servers (`ci` and `main`)
after the [database decomposition](https://gitlab.com/groups/gitlab-org/-/epics/6168). The worker
ill determine which parent tables belong to which database by reading the
`lib/gitlab/database/gitlab_schemas.yml` YAML file.
@@ -547,10 +547,10 @@ Example:
- `ci_builds`
- `ci_pipelines`
-When the worker is invoked for the Ci database, the worker will load deleted records only from the
+When the worker is invoked for the `ci` database, the worker loads deleted records only from the
`ci_builds` and `ci_pipelines` tables. During the cleanup process, `DELETE` and `UPDATE` queries
-will mostly run on tables located in the Main database. In this example, one `UPDATE` query will
-nullify the `merge_requests.head_pipeline_id` column.
+mostly run on tables located in the Main database. In this example, one `UPDATE` query
+nullifies the `merge_requests.head_pipeline_id` column.
#### Database partitioning
@@ -561,7 +561,7 @@ strategy was considered for the feature but due to the large data volume we deci
new strategy.
A deleted record is considered fully processed when all its direct children records have been
-cleaned up. When this happens, the loose foreign key worker will update the `status` column of
+cleaned up. When this happens, the loose foreign key worker updates the `status` column of
the deleted record. After this step, the record is no longer needed.
The sliding partitioning strategy provides an efficient way of cleaning up old, unused data by
@@ -591,7 +591,7 @@ Partitions: gitlab_partitions_dynamic.loose_foreign_keys_deleted_records_84 FOR
```
The `partition` column controls the insert direction, the `partition` value determines which
-partition will get the deleted rows inserted via the trigger. Notice that the default value of
+partition gets the deleted rows inserted via the trigger. Notice that the default value of
the `partition` table matches with the value of the list partition (84). In `INSERT` query
within the trigger the value of the `partition` is omitted, the trigger always relies on the
default value of the column.
@@ -709,12 +709,12 @@ To mitigate these issues, several limits are applied when the worker runs.
The limit rules are implemented in the `LooseForeignKeys::ModificationTracker` class. When one of
the limits (record modification count, time limit) is reached the processing is stopped
-immediately. After some time, the next scheduled worker will continue the cleanup process.
+immediately. After some time, the next scheduled worker continues the cleanup process.
#### Performance characteristics
The database trigger on the parent tables will **decrease** the record deletion speed. Each
-statement that removes rows from the parent table will invoke the trigger to insert records
+statement that removes rows from the parent table invokes the trigger to insert records
into the `loose_foreign_keys_deleted_records` table.
The queries within the cleanup worker are fairly efficient index scans, with limits in place
diff --git a/doc/development/database/migrations_for_multiple_databases.md b/doc/development/database/migrations_for_multiple_databases.md
index 2500071f4cf..df9607f5672 100644
--- a/doc/development/database/migrations_for_multiple_databases.md
+++ b/doc/development/database/migrations_for_multiple_databases.md
@@ -260,7 +260,7 @@ the `database_tasks: false` set. `gitlab:db:validate_config` always runs before
## Validation
-Validation in a nutshell uses [pg_query](https://github.com/pganalyze/pg_query) to analyze
+Validation in a nutshell uses [`pg_query`](https://github.com/pganalyze/pg_query) to analyze
each query and classify tables with information from [`gitlab_schema.yml`](multiple_databases.md#gitlab-schema).
The migration is skipped if the specified `gitlab_schema` is outside of a list of schemas
managed by a given database connection (`Gitlab::Database::gitlab_schemas_for_connection`).
@@ -427,7 +427,7 @@ updating all `ci_pipelines`, you would set
As with all DML migrations, you cannot query another database outside of
`restrict_gitlab_migration` or `gitlab_shared`. If you need to query another database,
-you'll likely need to separate these into two migrations somehow.
+separate the migrations.
Because the actual migration logic (not the queueing step) for background
migrations runs in a Sidekiq worker, the logic can perform DML queries on
diff --git a/doc/development/database/strings_and_the_text_data_type.md b/doc/development/database/strings_and_the_text_data_type.md
index d764e54aa76..73e023f8d45 100644
--- a/doc/development/database/strings_and_the_text_data_type.md
+++ b/doc/development/database/strings_and_the_text_data_type.md
@@ -8,7 +8,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/30453) in GitLab 13.0.
-When adding new columns that will be used to store strings or other textual information:
+When adding new columns to store strings or other textual information:
1. We always use the `text` data type instead of the `string` data type.
1. `text` columns should always have a limit set, either by using the `create_table` with
@@ -142,8 +142,8 @@ instance of GitLab could have such records, so we would follow the same process
We first add the limit as a `NOT VALID` check constraint to the table, which enforces consistency when
new records are inserted or current records are updated.
-In the example above, the existing issues with more than 1024 characters in their title will not be
-affected and you'll be still able to update records in the `issues` table. However, when you'd try
+In the example above, the existing issues with more than 1024 characters in their title are not
+affected, and you are still able to update records in the `issues` table. However, when you'd try
to update the `title_html` with a title that has more than 1024 characters, the constraint causes
a database error.
@@ -182,7 +182,7 @@ end
#### Data migration to fix existing records (current release)
The approach here depends on the data volume and the cleanup strategy. The number of records that must
-be fixed on GitLab.com is a nice indicator that will help us decide whether to use a post-deployment
+be fixed on GitLab.com is a nice indicator that helps us decide whether to use a post-deployment
migration or a background data migration:
- If the data volume is less than `1,000` records, then the data migration can be executed within the post-migration.
@@ -233,7 +233,7 @@ You can find more information on the guide about [background migrations](backgro
#### Validate the text limit (next release)
-Validating the text limit will scan the whole table and make sure that each record is correct.
+Validating the text limit scans the whole table, and makes sure that each record is correct.
Still in our example, for the 13.1 milestone (next), we run the `validate_text_limit` migration
helper in a final post-deployment migration,
@@ -276,11 +276,11 @@ end
## Text limit constraints on large tables
If you have to clean up a text column for a really [large table](https://gitlab.com/gitlab-org/gitlab/-/blob/master/rubocop/rubocop-migrations.yml#L3)
-(for example, the `artifacts` in `ci_builds`), your background migration will go on for a while and
-it will need an additional [background migration cleaning up](background_migrations.md#cleaning-up)
+(for example, the `artifacts` in `ci_builds`), your background migration goes on for a while and
+it needs an additional [background migration cleaning up](background_migrations.md#cleaning-up)
in the release after adding the data migration.
-In that rare case you will need 3 releases end-to-end:
+In that rare case you need 3 releases end-to-end:
1. Release `N.M` - Add the text limit and the background migration to fix the existing records.
1. Release `N.M+1` - Cleanup the background migration.
diff --git a/doc/development/database_query_comments.md b/doc/development/database_query_comments.md
index 9cfa5540c83..2798071bc06 100644
--- a/doc/development/database_query_comments.md
+++ b/doc/development/database_query_comments.md
@@ -12,7 +12,7 @@ queries generated by ActiveRecord.
It is very useful for tracing problematic queries back to the application source.
-An engineer during an on-call incident will have the full context of a query
+An engineer during an on-call incident has the full context of a query
and its application source from the comments.
## Metadata information in comments
@@ -24,7 +24,7 @@ Queries generated from **Rails** include the following metadata in comments:
- `endpoint_id`
- `line`
-Queries generated from **Sidekiq** workers will include the following metadata
+Queries generated from **Sidekiq** workers include the following metadata
in comments:
- `application`
diff --git a/doc/development/insert_into_tables_in_batches.md b/doc/development/insert_into_tables_in_batches.md
index c8bb4ce1c6d..ebed3d16319 100644
--- a/doc/development/insert_into_tables_in_batches.md
+++ b/doc/development/insert_into_tables_in_batches.md
@@ -48,7 +48,7 @@ records = [MyModel.new, ...]
MyModel.bulk_insert!(records)
```
-Note that calls to `bulk_insert!` will always attempt to insert _new records_. If instead
+Calls to `bulk_insert!` always attempt to insert _new records_. If instead
you would like to replace existing records with new values, while still inserting those
that do not already exist, then you can use `bulk_upsert!`:
@@ -59,9 +59,9 @@ MyModel.bulk_upsert!(records, unique_by: [:name])
```
In this example, `unique_by` specifies the columns by which records are considered to be
-unique and as such will be updated if they existed prior to insertion. For example, if
+unique and as such are updated if they existed prior to insertion. For example, if
`existing_model` has a `name` attribute, and if a record with the same `name` value already
-exists, its fields will be updated with those of `existing_model`.
+exists, its fields are updated with those of `existing_model`.
The `unique_by` parameter can also be passed as a `Symbol`, in which case it specifies
a database index by which a column is considered unique:
@@ -72,8 +72,8 @@ MyModel.bulk_insert!(records, unique_by: :index_on_name)
### Record validation
-The `bulk_insert!` method guarantees that `records` will be inserted transactionally, and
-will run validations on each record prior to insertion. If any record fails to validate,
+The `bulk_insert!` method guarantees that `records` are inserted transactionally, and
+runs validations on each record prior to insertion. If any record fails to validate,
an error is raised and the transaction is rolled back. You can turn off validations via
the `:validate` option:
@@ -83,7 +83,7 @@ MyModel.bulk_insert!(records, validate: false)
### Batch size configuration
-In those cases where the number of `records` is above a given threshold, insertions will
+In those cases where the number of `records` is above a given threshold, insertions
occur in multiple batches. The default batch size is defined in
[`BulkInsertSafe::DEFAULT_BATCH_SIZE`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/models/concerns/bulk_insert_safe.rb).
Assuming a default threshold of 500, inserting 950 records
@@ -95,7 +95,7 @@ MyModel.bulk_insert!(records, batch_size: 100)
```
Assuming the same number of 950 records, this would result in 10 batches being written instead.
-Since this will also affect the number of `INSERT`s that occur, make sure you measure the
+Since this also affects the number of `INSERT` statements that occur, make sure you measure the
performance impact this might have on your code. There is a trade-off between the number of
`INSERT` statements the database has to process and the size and cost of each `INSERT`.
@@ -127,7 +127,7 @@ records are inserted in bulk, we currently prevent their use.
The specifics around which callbacks are explicitly allowed are defined in
[`BulkInsertSafe`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/models/concerns/bulk_insert_safe.rb).
Consult the module source code for details. If your class uses callbacks that are not explicitly designated
-safe and you `include BulkInsertSafe` the application will fail with an error.
+safe and you `include BulkInsertSafe` the application fails with an error.
### `BulkInsertSafe` versus `InsertAll`
@@ -155,7 +155,7 @@ owner = OwnerModel.new(owned_relations: array_of_owned_relations)
owner.save!
```
-This will issue a single `INSERT`, and transaction, for every record in `owned_relations`, which is inefficient if
+This issues a single `INSERT`, and transaction, for every record in `owned_relations`, which is inefficient if
`array_of_owned_relations` is large. To remedy this, the `BulkInsertableAssociations` concern can be
used to declare that the owner defines associations that are safe for bulk insertion:
@@ -180,8 +180,8 @@ BulkInsertableAssociations.with_bulk_insert do
end
```
-Note that you can still save relations that are not `BulkInsertSafe` in this block; they will
-simply be treated as if you had invoked `save` from outside the block.
+You can still save relations that are not `BulkInsertSafe` in this block; they
+simply are treated as if you had invoked `save` from outside the block.
## Known limitations
@@ -192,5 +192,5 @@ There are a few restrictions to how these APIs can be used:
- It does not yet support `has_many through: ...` relations.
Moreover, input data should either be limited to around 1000 records at most,
-or already batched prior to calling bulk insert. The `INSERT` statement will run in a single
+or already batched prior to calling bulk insert. The `INSERT` statement runs in a single
transaction, so for large amounts of records it may negatively affect database stability.
diff --git a/doc/development/ordering_table_columns.md b/doc/development/ordering_table_columns.md
index 42e5588e010..7cd3d4fb208 100644
--- a/doc/development/ordering_table_columns.md
+++ b/doc/development/ordering_table_columns.md
@@ -24,15 +24,15 @@ The first column is a 4-byte integer. The next is text of variable length. The
bytes. To meet the alignment requirements, four zeros are to be added right
after the first column, so `id` occupies 4 bytes, then 4 bytes of alignment
padding, and only next `name` is being stored. Therefore, in this case, 8 bytes
-will be spent for storing a 4-byte integer.
+are spent for storing a 4-byte integer.
The space between rows is also subject to alignment padding. The `user_id`
-column takes only 4 bytes, and on 64-bit platform, 4 zeroes will be added for
+column takes only 4 bytes, and on 64-bit platform, 4 zeroes are added for
alignment padding, to allow storing the next row beginning with the "clear" word.
As a result, the actual size of each column would be (omitting variable length
data and 24-byte tuple header): 8 bytes, variable, 8 bytes. This means that
-each row will require at least 16 bytes for the two 4-byte integers. If a table
+each row requires at least 16 bytes for the two 4-byte integers. If a table
has a few rows this is not an issue. However, once you start storing millions of
rows you can save space by using a different order. For the above example, the
ideal column order would be the following:
@@ -49,7 +49,7 @@ or
In these examples, the `id` and `user_id` columns are packed together, which
means we only need 8 bytes to store _both_ of them. This in turn means each row
-will require 8 bytes less space.
+requires 8 bytes less space.
Since Ruby on Rails 5.1, the default data type for IDs is `bigint`, which uses 8 bytes.
We are using `integer` in the examples to showcase a more realistic reordering scenario.
@@ -57,7 +57,7 @@ We are using `integer` in the examples to showcase a more realistic reordering s
## Type Sizes
While the [PostgreSQL documentation](https://www.postgresql.org/docs/current/datatype.html) contains plenty
-of information we will list the sizes of common types here so it's easier to
+of information we list the sizes of common types here so it's easier to
look them up. Here "word" refers to the word size, which is 4 bytes for a 32
bits platform and 8 bytes for a 64 bits platform.
@@ -69,7 +69,7 @@ bits platform and 8 bytes for a 64 bits platform.
| `real` | 4 bytes | 1 word |
| `double precision` | 8 bytes | 8 bytes |
| `boolean` | 1 byte | not needed |
-| `text` / `string` | variable, 1 byte plus the data | 1 word |
+| `text` / `string` | variable, 1 byte plus the data | 1 word |
| `bytea` | variable, 1 or 4 bytes plus the data | 1 word |
| `timestamp` | 8 bytes | 8 bytes |
| `timestamptz` | 8 bytes | 8 bytes |
@@ -77,7 +77,7 @@ bits platform and 8 bytes for a 64 bits platform.
A "variable" size means the actual size depends on the value being stored. If
PostgreSQL determines this can be embedded directly into a row it may do so, but
-for very large values it will store the data externally and store a pointer (of
+for very large values it stores the data externally and store a pointer (of
1 word in size) in the column. Because of this variable sized columns should
always be at the end of a table.
diff --git a/doc/development/query_performance.md b/doc/development/query_performance.md
index 87e41c78e19..4fe27d42c38 100644
--- a/doc/development/query_performance.md
+++ b/doc/development/query_performance.md
@@ -11,7 +11,7 @@ This document describes various guidelines to follow when optimizing SQL queries
When you are optimizing your SQL queries, there are two dimensions to pay attention to:
1. The query execution time. This is paramount as it reflects how the user experiences GitLab.
-1. The query plan. Optimizing the query plan is important in allowing queries to independently scale over time. Realizing that an index will keep a query performing well as the table grows before the query degrades is an example of why we analyze these plans.
+1. The query plan. Optimizing the query plan is important in allowing queries to independently scale over time. Realizing that an index keeps a query performing well as the table grows before the query degrades is an example of why we analyze these plans.
## Timing guidelines for queries
@@ -39,9 +39,9 @@ cache, or what PostgreSQL calls shared buffers. This is the "warm cache" query.
When analyzing an [`EXPLAIN` plan](understanding_explain_plans.md), you can see
the difference not only in the timing, but by looking at the output for `Buffers`
by running your explain with `EXPLAIN(analyze, buffers)`. [Database Lab](understanding_explain_plans.md#database-lab-engine)
-will automatically include these options.
+automatically includes these options.
-If you are making a warm cache query, you will only see the `shared hits`.
+If you are making a warm cache query, you see only the `shared hits`.
For example in #database-lab:
@@ -57,7 +57,7 @@ Or in the explain plan from `psql`:
Buffers: shared hit=7323
```
-If the cache is cold, you will also see `reads`.
+If the cache is cold, you also see `reads`.
In #database-lab:
diff --git a/doc/development/swapping_tables.md b/doc/development/swapping_tables.md
index d6c5b8f0662..efb481ccf35 100644
--- a/doc/development/swapping_tables.md
+++ b/doc/development/swapping_tables.md
@@ -10,12 +10,12 @@ Sometimes you need to replace one table with another. For example, when
migrating data in a very large table it's often better to create a copy of the
table and insert & migrate the data into this new table in the background.
-Let's say you want to swap the table "events" with "events_for_migration". In
+Let's say you want to swap the table `events` with `events_for_migration`. In
this case you need to follow 3 steps:
-1. Rename "events" to "events_temporary"
-1. Rename "events_for_migration" to "events"
-1. Rename "events_temporary" to "events_for_migration"
+1. Rename `events` to `events_temporary`
+1. Rename `events_for_migration` to `events`
+1. Rename `events_temporary` to `events_for_migration`
Rails allows you to do this using the `rename_table` method:
@@ -27,7 +27,7 @@ rename_table :events_temporary, :events_for_migration
This does not require any downtime as long as the 3 `rename_table` calls are
executed in the _same_ database transaction. Rails by default uses database
-transactions for migrations, but if it doesn't you'll need to start one
+transactions for migrations, but if it doesn't you need to start one
manually:
```ruby
@@ -45,7 +45,7 @@ PostgreSQL you can use the `reset_pk_sequence!` method like so:
reset_pk_sequence!('events')
```
-Failure to reset the primary keys will result in newly created rows starting
+Failure to reset the primary keys results in newly created rows starting
with an ID value of 1. Depending on the existing data this can then lead to
duplicate key constraints from popping up, preventing users from creating new
data.
diff --git a/doc/development/understanding_explain_plans.md b/doc/development/understanding_explain_plans.md
index 3fc071bc5ff..17fcd5b3e88 100644
--- a/doc/development/understanding_explain_plans.md
+++ b/doc/development/understanding_explain_plans.md
@@ -7,7 +7,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Understanding EXPLAIN plans
PostgreSQL allows you to obtain query plans using the `EXPLAIN` command. This
-command can be invaluable when trying to determine how a query will perform.
+command can be invaluable when trying to determine how a query performs.
You can use this command directly in your SQL query, as long as the query starts
with it:
@@ -26,7 +26,7 @@ Aggregate (cost=922411.76..922411.77 rows=1 width=8)
Filter: (visibility_level = ANY ('{0,20}'::integer[]))
```
-When using _just_ `EXPLAIN`, PostgreSQL won't actually execute our query,
+When using _just_ `EXPLAIN`, PostgreSQL does not actually execute our query,
instead it produces an _estimated_ execution plan based on the available
statistics. This means the actual plan can differ quite a bit. Fortunately,
PostgreSQL provides us with the option to execute the query as well. To do so,
@@ -39,7 +39,7 @@ FROM projects
WHERE visibility_level IN (0, 20);
```
-This will produce:
+This produces:
```sql
Aggregate (cost=922420.60..922420.61 rows=1 width=8) (actual time=3428.535..3428.535 rows=1 loops=1)
@@ -54,7 +54,7 @@ As we can see this plan is quite different, and includes a lot more data. Let's
discuss this step by step.
Because `EXPLAIN ANALYZE` executes the query, care should be taken when using a
-query that will write data or might time out. If the query modifies data,
+query that writes data or might time out. If the query modifies data,
consider wrapping it in a transaction that rolls back automatically like so:
```sql
@@ -73,7 +73,7 @@ FROM projects
WHERE visibility_level IN (0, 20);
```
-This will then produce:
+This then produces:
```sql
Aggregate (cost=922420.60..922420.61 rows=1 width=8) (actual time=3428.535..3428.535 rows=1 loops=1)
@@ -120,10 +120,10 @@ Aggregate (cost=922411.76..922411.77 rows=1 width=8)
Here the first node executed is `Seq scan on projects`. The `Filter:` is an
additional filter applied to the results of the node. A filter is very similar
to Ruby's `Array#select`: it takes the input rows, applies the filter, and
-produces a new list of rows. Once the node is done, we perform the `Aggregate`
+produces a new list of rows. After the node is done, we perform the `Aggregate`
above it.
-Nested nodes will look like this:
+Nested nodes look like this:
```sql
Aggregate (cost=176.97..176.98 rows=1 width=8) (actual time=0.252..0.252 rows=1 loops=1)
@@ -152,7 +152,7 @@ number of rows produced, the number of loops performed, and more. For example:
Seq Scan on projects (cost=0.00..908044.47 rows=5746914 width=0)
```
-Here we can see that our cost ranges from `0.00..908044.47` (we'll cover this in
+Here we can see that our cost ranges from `0.00..908044.47` (we cover this in
a moment), and we estimate (since we're using `EXPLAIN` and not `EXPLAIN
ANALYZE`) a total of 5,746,914 rows to be produced by this node. The `width`
statistics describes the estimated width of each row, in bytes.
@@ -171,7 +171,7 @@ The startup cost states how expensive it was to start the node, with the total
cost describing how expensive the entire node was. In general: the greater the
values, the more expensive the node.
-When using `EXPLAIN ANALYZE`, these statistics will also include the actual time
+When using `EXPLAIN ANALYZE`, these statistics also include the actual time
(in milliseconds) spent, and other runtime statistics (for example, the actual number of
produced rows):
@@ -183,7 +183,7 @@ Here we can see we estimated 5,746,969 rows to be returned, but in reality we
returned 5,746,940 rows. We can also see that _just_ this sequential scan took
2.98 seconds to run.
-Using `EXPLAIN (ANALYZE, BUFFERS)` will also give us information about the
+Using `EXPLAIN (ANALYZE, BUFFERS)` also gives us information about the
number of rows removed by a filter, the number of buffers used, and more. For
example:
@@ -242,7 +242,7 @@ retrieving lots of rows, so it's best to avoid these for large tables.
A scan on an index that did not require fetching anything from the table. In
certain cases an index only scan may still fetch data from the table, in this
-case the node will include a `Heap Fetches:` statistic.
+case the node includes a `Heap Fetches:` statistic.
### Index Scan
@@ -273,7 +273,7 @@ Sorts the input rows as specified using an `ORDER BY` statement.
### Nested Loop
-A nested loop will execute its child nodes for every row produced by a node that
+A nested loop executes its child nodes for every row produced by a node that
precedes it. For example:
```sql
@@ -316,7 +316,7 @@ FROM users
WHERE twitter != '';
```
-This will produce the following plan:
+This produces the following plan:
```sql
Aggregate (cost=845110.21..845110.22 rows=1 width=8) (actual time=1271.157..1271.158 rows=1 loops=1)
@@ -435,7 +435,7 @@ This index would only index the `email` value of rows that match `WHERE id <
CREATE INDEX CONCURRENTLY twitter_test ON users (twitter) WHERE twitter != '';
```
-Once created, if we run our query again we will be given the following plan:
+After being created, if we run our query again we are given the following plan:
```sql
Aggregate (cost=1608.26..1608.27 rows=1 width=8) (actual time=19.821..19.821 rows=1 loops=1)
@@ -466,7 +466,7 @@ be used for comparison (for example, it depends a lot on the state of cache).
When optimizing a query, we usually need to reduce the amount of data we're
dealing with. Indexes are the way to work with fewer pages (buffers) to get the
result, so, during optimization, look at the number of buffers used (read and hit),
-and work on reducing these numbers. Reduced timing will be the consequence of reduced
+and work on reducing these numbers. Reduced timing is the consequence of reduced
buffer numbers. [Database Lab Engine](#database-lab-engine) guarantees that the plan is structurally
identical to production (and overall number of buffers is the same as on production),
but difference in cache state and I/O speed may lead to different timings.
@@ -508,8 +508,8 @@ index on `projects.visibility_level` to somehow turn this Sequential scan +
filter into an index-only scan.
Unfortunately, doing so is unlikely to improve anything. Contrary to what some
-might believe, an index being present _does not guarantee_ that PostgreSQL will
-actually use it. For example, when doing a `SELECT * FROM projects` it is much
+might believe, an index being present _does not guarantee_ that PostgreSQL
+actually uses it. For example, when doing a `SELECT * FROM projects` it is much
cheaper to just scan the entire table, instead of using an index and then
fetching data from the table. In such cases PostgreSQL may decide to not use an
index.
@@ -539,7 +539,7 @@ For GitLab.com this produces:
Here the total number of projects is 5,811,804, and 5,746,126 of those are of
level 0 or 20. That's 98% of the entire table!
-So no matter what we do, this query will retrieve 98% of the entire table. Since
+So no matter what we do, this query retrieves 98% of the entire table. Since
most time is spent doing exactly that, there isn't really much we can do to
improve this query, other than _not_ running it at all.
@@ -589,7 +589,7 @@ Foreign-key constraints:
"fk_rails_722ceba4f7" FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE
```
-Let's rewrite our query to JOIN this table onto our projects, and get the
+Let's rewrite our query to `JOIN` this table onto our projects, and get the
projects for a specific user:
```sql
@@ -604,7 +604,7 @@ AND user_interacted_projects.user_id = 1;
What we do here is the following:
1. Get our projects.
-1. INNER JOIN `user_interacted_projects`, meaning we're only left with rows in
+1. `INNER JOIN` `user_interacted_projects`, meaning we're only left with rows in
`projects` that have a corresponding row in `user_interacted_projects`.
1. Limit this to the projects with `visibility_level` of 0 or 20, and to
projects that the user with ID 1 interacted with.
@@ -765,7 +765,7 @@ The web interface comes with the following execution plan visualizers included:
#### Tips & Tricks
-The database connection is now maintained during your whole session, so you can use `exec set ...` for any session variables (such as `enable_seqscan` or `work_mem`). These settings will be applied to all subsequent commands until you reset them. For example you can disable parallel queries with
+The database connection is now maintained during your whole session, so you can use `exec set ...` for any session variables (such as `enable_seqscan` or `work_mem`). These settings are applied to all subsequent commands until you reset them. For example you can disable parallel queries with
```sql
exec SET max_parallel_workers_per_gather = 0
diff --git a/doc/user/admin_area/settings/visibility_and_access_controls.md b/doc/user/admin_area/settings/visibility_and_access_controls.md
index 55a399f0241..c07f4ea51f1 100644
--- a/doc/user/admin_area/settings/visibility_and_access_controls.md
+++ b/doc/user/admin_area/settings/visibility_and_access_controls.md
@@ -253,6 +253,33 @@ work in every repository. They can only be re-enabled by an administrator user o
![Mirror settings](img/mirror_settings.png)
+## Configure globally-allowed IP address ranges
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/87579) in GitLab 15.1 [with a flag](../../../administration/feature_flags.md) named `group_ip_restrictions_allow_global`. Disabled by default.
+
+FLAG:
+On self-managed GitLab, by default this feature is not available. To make it available
+per group, ask an administrator to [enable the feature flag](../../../administration/feature_flags.md)
+named `group_ip_restrictions_allow_global`.
+On GitLab.com, this feature is available.
+
+This setting allows you to set IP address ranges to be combined with group-level IP allowlists.
+It helps administrators prevent aspects of the GitLab installation from being blocked
+from working as intended when an IP allowlist is used.
+
+For example, if the GitLab Pages daemon runs on the `10.0.0.0/24` range, specify that range in this
+field, as otherwise any group-level restrictions that don't include that range cause the Pages
+daemon to be unable to fetch artifacts from the pipeline runs.
+
+To add a IP address range to the group-level allowlist:
+
+1. Sign in to GitLab as a user with Administrator access level.
+1. On the top bar, select **Menu > Admin**.
+1. On the left sidebar, select **Settings > General**.
+1. Expand the **Visibility and access controls** section.
+1. In **Globally-allowed IP ranges**, provide a value.
+1. Select **Save changes**.
+
<!-- ## Troubleshooting
Include any troubleshooting steps that you can foresee. If you know beforehand what issues
diff --git a/doc/user/group/index.md b/doc/user/group/index.md
index 8a588d5bd76..4133b75ad9c 100644
--- a/doc/user/group/index.md
+++ b/doc/user/group/index.md
@@ -640,6 +640,10 @@ To restrict group access by IP address:
![Domain restriction by IP address](img/restrict-by-ip.gif)
+In self-managed installations of GitLab 15.1 and later, you can also configure
+[globally-allowed IP address ranges](../admin_area/settings/visibility_and_access_controls.md#configure-globally-allowed-ip-address-ranges)
+at the group level.
+
## Restrict group access by domain **(PREMIUM)**
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/7297) in GitLab 12.2.
@@ -845,6 +849,7 @@ Support for group-level settings for merge request approval rules is tracked in
- [Enforce two-factor authentication (2FA)](../../security/two_factor_authentication.md#enforce-2fa-for-all-users-in-a-group): Enforce 2FA
for all group members.
- Namespaces [API](../../api/namespaces.md) and [Rake tasks](../../raketasks/features.md).
+- [Control access and visibility](../admin_area/settings/visibility_and_access_controls.md).
## Troubleshooting
diff --git a/doc/user/infrastructure/iac/terraform_state.md b/doc/user/infrastructure/iac/terraform_state.md
index 7abdbecf3d9..e8637abce91 100644
--- a/doc/user/infrastructure/iac/terraform_state.md
+++ b/doc/user/infrastructure/iac/terraform_state.md
@@ -119,7 +119,7 @@ You can use a GitLab-managed Terraform state backend as a
variable "example_access_token" {
type = string
- description = "Gitlab acess token to query remote state"
+ description = "GitLab access token to query remote state"
}
```
diff --git a/lib/gitlab/ci/templates/Security/DAST.latest.gitlab-ci.yml b/lib/gitlab/ci/templates/Security/DAST.latest.gitlab-ci.yml
index e5ac5099546..10549b56856 100644
--- a/lib/gitlab/ci/templates/Security/DAST.latest.gitlab-ci.yml
+++ b/lib/gitlab/ci/templates/Security/DAST.latest.gitlab-ci.yml
@@ -48,13 +48,10 @@ dast:
$CI_DEFAULT_BRANCH == $CI_COMMIT_REF_NAME
when: never
- if: $CI_DEFAULT_BRANCH != $CI_COMMIT_REF_NAME &&
- $REVIEW_DISABLED && $DAST_WEBSITE == null &&
- $DAST_API_SPECIFICATION == null
+ $REVIEW_DISABLED
when: never
- if: $CI_COMMIT_BRANCH &&
($CI_KUBERNETES_ACTIVE || $KUBECONFIG) &&
$GITLAB_FEATURES =~ /\bdast\b/
- if: $CI_COMMIT_BRANCH &&
- $DAST_WEBSITE
- - if: $CI_COMMIT_BRANCH &&
- $DAST_API_SPECIFICATION
+ $GITLAB_FEATURES =~ /\bdast\b/
diff --git a/lib/gitlab/project_stats_refresh_conflicts_logger.rb b/lib/gitlab/project_stats_refresh_conflicts_logger.rb
index 49f5a544a87..3e7eecce89c 100644
--- a/lib/gitlab/project_stats_refresh_conflicts_logger.rb
+++ b/lib/gitlab/project_stats_refresh_conflicts_logger.rb
@@ -20,5 +20,15 @@ module Gitlab
Gitlab::AppLogger.warn(payload)
end
+
+ def self.warn_skipped_artifact_deletion_during_stats_refresh(project_ids:, method:)
+ payload = Gitlab::ApplicationContext.current.merge(
+ message: 'Skipped deleting artifacts undergoing refresh',
+ method: method,
+ project_ids: project_ids
+ )
+
+ Gitlab::AppLogger.warn(payload)
+ end
end
end
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 89654fdd0ab..41f8e9a4cc2 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -32219,6 +32219,9 @@ msgstr ""
msgid "Request attention from assignee(s) or reviewer(s)"
msgstr ""
+msgid "Request data is too large"
+msgstr ""
+
msgid "Request details"
msgstr ""
diff --git a/spec/features/admin/admin_hook_logs_spec.rb b/spec/features/admin/admin_hook_logs_spec.rb
index fd51fd71fea..6caf2b24555 100644
--- a/spec/features/admin/admin_hook_logs_spec.rb
+++ b/spec/features/admin/admin_hook_logs_spec.rb
@@ -41,4 +41,18 @@ RSpec.describe 'Admin::HookLogs' do
expect(page).to have_current_path(edit_admin_hook_path(system_hook), ignore_query: true)
end
+
+ context 'response data is too large' do
+ let(:hook_log) { create(:web_hook_log, web_hook: system_hook, request_data: WebHookLog::OVERSIZE_REQUEST_DATA) }
+
+ it 'shows request data as too large and disables retry function' do
+ visit(admin_hook_hook_log_path(system_hook, hook_log))
+
+ expect(page).to have_content('Request data is too large')
+ expect(page).not_to have_button(
+ _('Resent request'),
+ disabled: true, class: 'has-tooltip', title: _("Request data is too large")
+ )
+ end
+ end
end
diff --git a/spec/features/projects/hook_logs/user_reads_log_spec.rb b/spec/features/projects/hook_logs/user_reads_log_spec.rb
index 8513a9374d1..9b7ec14c36f 100644
--- a/spec/features/projects/hook_logs/user_reads_log_spec.rb
+++ b/spec/features/projects/hook_logs/user_reads_log_spec.rb
@@ -3,21 +3,80 @@
require 'spec_helper'
RSpec.describe 'Hook logs' do
- let(:web_hook_log) { create(:web_hook_log, response_body: '<script>') }
- let(:project) { web_hook_log.web_hook.project }
+ let(:project) { create(:project) }
+ let(:project_hook) { create(:project_hook, project: project) }
+ let(:web_hook_log) { create(:web_hook_log, web_hook: project_hook, response_body: 'Hello World') }
let(:user) { create(:user) }
before do
+ web_hook_log
project.add_maintainer(user)
sign_in(user)
end
- it 'user reads log without getting XSS' do
- visit(
- project_hook_hook_log_path(
- project, web_hook_log.web_hook, web_hook_log))
+ it 'shows list of hook logs' do
+ visit edit_project_hook_path(project, project_hook)
- expect(page).to have_content('<script>')
+ expect(page).to have_content('Recent events')
+ expect(page).to have_link('View details', href: project_hook_hook_log_path(project, project_hook, web_hook_log))
+ end
+
+ it 'shows hook log details' do
+ visit edit_project_hook_path(project, project_hook)
+ click_link 'View details'
+
+ expect(page).to have_content("POST #{web_hook_log.url}")
+ expect(page).to have_content(web_hook_log.response_body)
+ expect(page).to have_content('Resend Request')
+ end
+
+ it 'retries hook log' do
+ WebMock.stub_request(:post, project_hook.url)
+
+ visit edit_project_hook_path(project, project_hook)
+ click_link 'View details'
+ click_link 'Resend Request'
+
+ expect(page).to have_current_path(edit_project_hook_path(project, project_hook), ignore_query: true)
+ end
+
+ context 'request gets internal error' do
+ let(:web_hook_log) { create(:web_hook_log, web_hook: project_hook, internal_error_message: 'Some error') }
+
+ it 'shows hook log details with internal error message' do
+ visit edit_project_hook_path(project, project_hook)
+ click_link 'View details'
+
+ expect(page).to have_content("POST #{web_hook_log.url}")
+ expect(page).to have_content(web_hook_log.internal_error_message)
+ expect(page).to have_content('Resend Request')
+ end
+ end
+
+ context 'response body contains XSS string' do
+ let(:web_hook_log) { create(:web_hook_log, web_hook: project_hook, response_body: '<script>') }
+
+ it 'displays log without getting XSS' do
+ visit(project_hook_hook_log_path(project, project_hook, web_hook_log))
+
+ expect(page).to have_content('<script>')
+ end
+ end
+
+ context 'response data is too large' do
+ let(:web_hook_log) do
+ create(:web_hook_log, web_hook: project_hook, request_data: WebHookLog::OVERSIZE_REQUEST_DATA)
+ end
+
+ it 'shows request data as too large and disables retry function' do
+ visit(project_hook_hook_log_path(project, project_hook, web_hook_log))
+
+ expect(page).to have_content('Request data is too large')
+ expect(page).not_to have_button(
+ _('Resent request'),
+ disabled: true, class: 'has-tooltip', title: _("Request data is too large")
+ )
+ end
end
end
diff --git a/spec/frontend/access_tokens/components/access_token_table_app_spec.js b/spec/frontend/access_tokens/components/access_token_table_app_spec.js
index 827bc1a6a4d..b45abe418e4 100644
--- a/spec/frontend/access_tokens/components/access_token_table_app_spec.js
+++ b/spec/frontend/access_tokens/components/access_token_table_app_spec.js
@@ -1,7 +1,8 @@
-import { GlTable } from '@gitlab/ui';
+import { GlPagination, GlTable } from '@gitlab/ui';
import { mount } from '@vue/test-utils';
import { nextTick } from 'vue';
import AccessTokenTableApp from '~/access_tokens/components/access_token_table_app.vue';
+import { EVENT_SUCCESS, PAGE_SIZE } from '~/access_tokens/components/constants';
import { __, s__, sprintf } from '~/locale';
import DomElementListener from '~/vue_shared/components/dom_element_listener.vue';
@@ -57,13 +58,14 @@ describe('~/access_tokens/components/access_token_table_app', () => {
const triggerSuccess = async (activeAccessTokens = defaultActiveAccessTokens) => {
wrapper
.findComponent(DomElementListener)
- .vm.$emit('ajax:success', { detail: [{ active_access_tokens: activeAccessTokens }] });
+ .vm.$emit(EVENT_SUCCESS, { detail: [{ active_access_tokens: activeAccessTokens }] });
await nextTick();
};
const findTable = () => wrapper.findComponent(GlTable);
const findHeaders = () => findTable().findAll('th > :first-child');
const findCells = () => findTable().findAll('td');
+ const findPagination = () => wrapper.findComponent(GlPagination);
afterEach(() => {
wrapper?.destroy();
@@ -225,4 +227,15 @@ describe('~/access_tokens/components/access_token_table_app', () => {
expect(cells.at(3).text()).not.toBe('Never');
expect(cells.at(10).text()).toBe('Never');
});
+
+ it('should show the pagination component when needed', async () => {
+ createComponent();
+ expect(findPagination().exists()).toBe(false);
+
+ await triggerSuccess(Array(PAGE_SIZE).fill(defaultActiveAccessTokens[0]));
+ expect(findPagination().exists()).toBe(false);
+
+ await triggerSuccess(Array(PAGE_SIZE + 1).fill(defaultActiveAccessTokens[0]));
+ expect(findPagination().exists()).toBe(true);
+ });
});
diff --git a/spec/frontend/access_tokens/components/new_access_token_app_spec.js b/spec/frontend/access_tokens/components/new_access_token_app_spec.js
index 25b3eba6587..0fdd77ef6f2 100644
--- a/spec/frontend/access_tokens/components/new_access_token_app_spec.js
+++ b/spec/frontend/access_tokens/components/new_access_token_app_spec.js
@@ -3,6 +3,7 @@ import { nextTick } from 'vue';
import { setHTMLFixture, resetHTMLFixture } from 'helpers/fixtures';
import { mountExtended } from 'helpers/vue_test_utils_helper';
import NewAccessTokenApp from '~/access_tokens/components/new_access_token_app.vue';
+import { EVENT_ERROR, EVENT_SUCCESS, FORM_SELECTOR } from '~/access_tokens/components/constants';
import { createAlert, VARIANT_INFO } from '~/flash';
import { __, sprintf } from '~/locale';
import DomElementListener from '~/vue_shared/components/dom_element_listener.vue';
@@ -22,20 +23,18 @@ describe('~/access_tokens/components/new_access_token_app', () => {
};
const triggerSuccess = async (newToken = 'new token') => {
- wrapper
- .find(DomElementListener)
- .vm.$emit('ajax:success', { detail: [{ new_token: newToken }] });
+ wrapper.find(DomElementListener).vm.$emit(EVENT_SUCCESS, { detail: [{ new_token: newToken }] });
await nextTick();
};
const triggerError = async (errors = ['1', '2']) => {
- wrapper.find(DomElementListener).vm.$emit('ajax:error', { detail: [{ errors }] });
+ wrapper.find(DomElementListener).vm.$emit(EVENT_ERROR, { detail: [{ errors }] });
await nextTick();
};
beforeEach(() => {
// NewAccessTokenApp observes a form element
- setHTMLFixture('<form id="js-new-access-token-form"><input type="submit"/></form>');
+ setHTMLFixture(`<form id="${FORM_SELECTOR}"><input type="submit"/></form>`);
createComponent();
});
diff --git a/spec/lib/gitlab/project_stats_refresh_conflicts_logger_spec.rb b/spec/lib/gitlab/project_stats_refresh_conflicts_logger_spec.rb
index 6dbfd5804d7..ce05d5b11c7 100644
--- a/spec/lib/gitlab/project_stats_refresh_conflicts_logger_spec.rb
+++ b/spec/lib/gitlab/project_stats_refresh_conflicts_logger_spec.rb
@@ -44,4 +44,24 @@ RSpec.describe Gitlab::ProjectStatsRefreshConflictsLogger do
described_class.warn_request_rejected_during_stats_refresh(project_id)
end
end
+
+ describe '.warn_skipped_artifact_deletion_during_stats_refresh' do
+ it 'logs a warning about artifacts being excluded from deletion while the project is undergoing stats refresh' do
+ project_ids = [12, 34]
+ method = 'Foo#action'
+
+ expect(Gitlab::AppLogger).to receive(:warn).with(
+ hash_including(
+ message: 'Skipped deleting artifacts undergoing refresh',
+ method: method,
+ project_ids: match_array(project_ids),
+ 'correlation_id' => an_instance_of(String),
+ 'meta.feature_category' => 'test',
+ 'meta.caller_id' => 'caller'
+ )
+ )
+
+ described_class.warn_skipped_artifact_deletion_during_stats_refresh(project_ids: project_ids, method: method)
+ end
+ end
end
diff --git a/spec/models/users/callout_spec.rb b/spec/models/users/callout_spec.rb
index 293f0279e79..14f555863ec 100644
--- a/spec/models/users/callout_spec.rb
+++ b/spec/models/users/callout_spec.rb
@@ -11,4 +11,16 @@ RSpec.describe Users::Callout do
it { is_expected.to validate_presence_of(:feature_name) }
it { is_expected.to validate_uniqueness_of(:feature_name).scoped_to(:user_id).ignoring_case_sensitivity }
end
+
+ describe 'scopes' do
+ describe '.with_feature_name' do
+ let_it_be(:feature_name) { described_class.feature_names.keys.last }
+ let_it_be(:user_callouts_for_feature_name) { create_list(:callout, 2, feature_name: feature_name) }
+ let_it_be(:another_user_callout) { create(:callout, feature_name: described_class.feature_names.each_key.first) }
+
+ it 'returns user callouts for the given feature name only' do
+ expect(described_class.with_feature_name(feature_name)).to eq(user_callouts_for_feature_name)
+ end
+ end
+ end
end
diff --git a/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb b/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb
index 1c6963e4a31..4f7663d7996 100644
--- a/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb
+++ b/spec/services/ci/job_artifacts/destroy_all_expired_service_spec.rb
@@ -99,6 +99,16 @@ RSpec.describe Ci::JobArtifacts::DestroyAllExpiredService, :clean_gitlab_redis_s
expect { subject }.not_to change { artifact.file.exists? }
end
end
+
+ context 'when the project in which the arfifact belongs to is undergoing stats refresh' do
+ before do
+ create(:project_build_artifacts_size_refresh, :pending, project: artifact.project)
+ end
+
+ it 'does not destroy job artifact' do
+ expect { subject }.not_to change { Ci::JobArtifact.count }
+ end
+ end
end
context 'when artifact is locked' do
diff --git a/spec/services/ci/job_artifacts/destroy_batch_service_spec.rb b/spec/services/ci/job_artifacts/destroy_batch_service_spec.rb
index 0bb062e6994..3a04a3af03e 100644
--- a/spec/services/ci/job_artifacts/destroy_batch_service_spec.rb
+++ b/spec/services/ci/job_artifacts/destroy_batch_service_spec.rb
@@ -4,7 +4,14 @@ require 'spec_helper'
RSpec.describe Ci::JobArtifacts::DestroyBatchService do
let(:artifacts) { Ci::JobArtifact.where(id: [artifact_with_file.id, artifact_without_file.id, trace_artifact.id]) }
- let(:service) { described_class.new(artifacts, pick_up_at: Time.current) }
+ let(:skip_projects_on_refresh) { false }
+ let(:service) do
+ described_class.new(
+ artifacts,
+ pick_up_at: Time.current,
+ skip_projects_on_refresh: skip_projects_on_refresh
+ )
+ end
let_it_be(:artifact_with_file, refind: true) do
create(:ci_job_artifact, :zip)
@@ -76,18 +83,101 @@ RSpec.describe Ci::JobArtifacts::DestroyBatchService do
create(:project_build_artifacts_size_refresh, :running, project: artifact_under_refresh_2.project)
end
- it 'logs the artifacts undergoing refresh and continues with the delete', :aggregate_failures do
- expect(Gitlab::ProjectStatsRefreshConflictsLogger).to receive(:warn_artifact_deletion_during_stats_refresh).with(
- method: 'Ci::JobArtifacts::DestroyBatchService#execute',
- project_id: artifact_under_refresh_1.project.id
- ).once
+ shared_examples 'avoiding N+1 queries' do
+ let!(:control_artifact_on_refresh) do
+ create(:ci_job_artifact, :zip)
+ end
+
+ let!(:control_artifact_non_refresh) do
+ create(:ci_job_artifact, :zip)
+ end
+
+ let!(:other_artifact_on_refresh) do
+ create(:ci_job_artifact, :zip)
+ end
+
+ let!(:other_artifact_on_refresh_2) do
+ create(:ci_job_artifact, :zip)
+ end
+
+ let!(:other_artifact_non_refresh) do
+ create(:ci_job_artifact, :zip)
+ end
+
+ let!(:control_artifacts) do
+ Ci::JobArtifact.where(
+ id: [
+ control_artifact_on_refresh.id,
+ control_artifact_non_refresh.id
+ ]
+ )
+ end
+
+ let!(:artifacts) do
+ Ci::JobArtifact.where(
+ id: [
+ other_artifact_on_refresh.id,
+ other_artifact_on_refresh_2.id,
+ other_artifact_non_refresh.id
+ ]
+ )
+ end
+
+ let(:control_service) do
+ described_class.new(
+ control_artifacts,
+ pick_up_at: Time.current,
+ skip_projects_on_refresh: skip_projects_on_refresh
+ )
+ end
+
+ before do
+ create(:project_build_artifacts_size_refresh, :pending, project: control_artifact_on_refresh.project)
+ create(:project_build_artifacts_size_refresh, :pending, project: other_artifact_on_refresh.project)
+ create(:project_build_artifacts_size_refresh, :pending, project: other_artifact_on_refresh_2.project)
+ end
+
+ it 'does not make multiple queries when fetching multiple project refresh records' do
+ control = ActiveRecord::QueryRecorder.new { control_service.execute }
+
+ expect { subject }.not_to exceed_query_limit(control)
+ end
+ end
+
+ context 'and skip_projects_on_refresh is set to false (default)' do
+ it 'logs the projects undergoing refresh and continues with the delete', :aggregate_failures do
+ expect(Gitlab::ProjectStatsRefreshConflictsLogger).to receive(:warn_artifact_deletion_during_stats_refresh).with(
+ method: 'Ci::JobArtifacts::DestroyBatchService#execute',
+ project_id: artifact_under_refresh_1.project.id
+ ).once
- expect(Gitlab::ProjectStatsRefreshConflictsLogger).to receive(:warn_artifact_deletion_during_stats_refresh).with(
- method: 'Ci::JobArtifacts::DestroyBatchService#execute',
- project_id: artifact_under_refresh_2.project.id
- ).once
+ expect(Gitlab::ProjectStatsRefreshConflictsLogger).to receive(:warn_artifact_deletion_during_stats_refresh).with(
+ method: 'Ci::JobArtifacts::DestroyBatchService#execute',
+ project_id: artifact_under_refresh_2.project.id
+ ).once
+
+ expect { subject }.to change { Ci::JobArtifact.count }.by(-4)
+ end
+
+ it_behaves_like 'avoiding N+1 queries'
+ end
+
+ context 'and skip_projects_on_refresh is set to true' do
+ let(:skip_projects_on_refresh) { true }
+
+ it 'logs the projects undergoing refresh and excludes the artifacts from deletion', :aggregate_failures do
+ expect(Gitlab::ProjectStatsRefreshConflictsLogger).to receive(:warn_skipped_artifact_deletion_during_stats_refresh).with(
+ method: 'Ci::JobArtifacts::DestroyBatchService#execute',
+ project_ids: match_array([artifact_under_refresh_1.project.id, artifact_under_refresh_2.project.id])
+ )
+
+ expect { subject }.to change { Ci::JobArtifact.count }.by(-1)
+ expect(Ci::JobArtifact.where(id: artifact_under_refresh_1.id)).to exist
+ expect(Ci::JobArtifact.where(id: artifact_under_refresh_2.id)).to exist
+ expect(Ci::JobArtifact.where(id: artifact_under_refresh_3.id)).to exist
+ end
- expect { subject }.to change { Ci::JobArtifact.count }.by(-4)
+ it_behaves_like 'avoiding N+1 queries'
end
end
diff --git a/spec/services/web_hook_service_spec.rb b/spec/services/web_hook_service_spec.rb
index b99bc860523..9f3093d64f3 100644
--- a/spec/services/web_hook_service_spec.rb
+++ b/spec/services/web_hook_service_spec.rb
@@ -5,6 +5,7 @@ require 'spec_helper'
RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state do
include StubRequests
+ let(:ellipsis) { '…' }
let_it_be(:project) { create(:project) }
let_it_be_with_reload(:project_hook) { create(:project_hook, project: project) }
@@ -268,6 +269,20 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
end
context 'execution logging' do
+ let(:default_log_data) do
+ {
+ trigger: 'push_hooks',
+ url: project_hook.url,
+ request_headers: headers,
+ request_data: data,
+ response_body: 'Success',
+ response_headers: {},
+ response_status: 200,
+ execution_duration: be > 0,
+ internal_error_message: nil
+ }
+ end
+
context 'with success' do
before do
stub_full_request(project_hook.url, method: :post).to_return(status: 200, body: 'Success')
@@ -280,7 +295,7 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
expect(::WebHooks::LogExecutionWorker).not_to receive(:perform_async)
expect(::WebHooks::LogExecutionService)
.to receive(:new)
- .with(hook: project_hook, log_data: Hash, response_category: :ok)
+ .with(hook: project_hook, log_data: default_log_data, response_category: :ok)
.and_return(double(execute: nil))
service_instance.execute
@@ -291,17 +306,7 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
.with(
project_hook.id,
- hash_including(
- trigger: 'push_hooks',
- url: project_hook.url,
- request_headers: headers,
- request_data: data,
- response_body: 'Success',
- response_headers: {},
- response_status: 200,
- execution_duration: be > 0,
- internal_error_message: nil
- ),
+ hash_including(default_log_data),
:ok,
nil
)
@@ -328,15 +333,10 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
.with(
project_hook.id,
hash_including(
- trigger: 'push_hooks',
- url: project_hook.url,
- request_headers: headers,
- request_data: data,
- response_body: 'Bad request',
- response_headers: {},
- response_status: 400,
- execution_duration: be > 0,
- internal_error_message: nil
+ default_log_data.merge(
+ response_body: 'Bad request',
+ response_status: 400
+ )
),
:failed,
nil
@@ -356,15 +356,11 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
.with(
project_hook.id,
hash_including(
- trigger: 'push_hooks',
- url: project_hook.url,
- request_headers: headers,
- request_data: data,
- response_body: '',
- response_headers: {},
- response_status: 'internal error',
- execution_duration: be > 0,
- internal_error_message: 'Some HTTP Post error'
+ default_log_data.merge(
+ response_body: '',
+ response_status: 'internal error',
+ internal_error_message: 'Some HTTP Post error'
+ )
),
:error,
nil
@@ -383,17 +379,86 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
.with(
project_hook.id,
- hash_including(
- trigger: 'push_hooks',
- url: project_hook.url,
- request_headers: headers,
- request_data: data,
- response_body: '',
- response_headers: {},
- response_status: 200,
- execution_duration: be > 0,
- internal_error_message: nil
- ),
+ hash_including(default_log_data.merge(response_body: '')),
+ :ok,
+ nil
+ )
+
+ service_instance.execute
+ end
+ end
+
+ context 'with oversize response body' do
+ let(:oversize_body) { 'a' * (described_class::RESPONSE_BODY_SIZE_LIMIT + 1) }
+ let(:stripped_body) { 'a' * (described_class::RESPONSE_BODY_SIZE_LIMIT - ellipsis.bytesize) + ellipsis }
+
+ before do
+ stub_full_request(project_hook.url, method: :post).to_return(status: 200, body: oversize_body)
+ end
+
+ it 'queues LogExecutionWorker with stripped response_body' do
+ expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
+ .with(
+ project_hook.id,
+ hash_including(default_log_data.merge(response_body: stripped_body)),
+ :ok,
+ nil
+ )
+
+ service_instance.execute
+ end
+ end
+
+ context 'with massive amount of headers' do
+ let(:response_headers) do
+ (1..described_class::RESPONSE_HEADERS_COUNT_LIMIT + 1).to_a.to_h do |num|
+ ["header-#{num}", SecureRandom.hex(num)]
+ end
+ end
+
+ let(:expected_response_headers) do
+ (1..described_class::RESPONSE_HEADERS_COUNT_LIMIT).to_a.to_h do |num|
+ # Capitalized
+ ["Header-#{num}", response_headers["header-#{num}"]]
+ end
+ end
+
+ before do
+ stub_full_request(project_hook.url, method: :post).to_return(
+ status: 200, body: 'Success', headers: response_headers
+ )
+ end
+
+ it 'queues LogExecutionWorker with limited amount of headers' do
+ expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
+ .with(
+ project_hook.id,
+ hash_including(default_log_data.merge(response_headers: expected_response_headers)),
+ :ok,
+ nil
+ )
+
+ service_instance.execute
+ end
+ end
+
+ context 'with oversize header' do
+ let(:oversize_header) { 'a' * (described_class::RESPONSE_HEADERS_SIZE_LIMIT + 1) }
+ let(:stripped_header) { 'a' * (described_class::RESPONSE_HEADERS_SIZE_LIMIT - ellipsis.bytesize) + ellipsis }
+ let(:response_headers) { { 'oversized-header' => oversize_header } }
+ let(:expected_response_headers) { { 'Oversized-Header' => stripped_header } }
+
+ before do
+ stub_full_request(project_hook.url, method: :post).to_return(
+ status: 200, body: 'Success', headers: response_headers
+ )
+ end
+
+ it 'queues LogExecutionWorker with stripped header value' do
+ expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
+ .with(
+ project_hook.id,
+ hash_including(default_log_data.merge(response_headers: expected_response_headers)),
:ok,
nil
)
@@ -401,6 +466,51 @@ RSpec.describe WebHookService, :request_store, :clean_gitlab_redis_shared_state
service_instance.execute
end
end
+
+ context 'with log data exceeding Sidekiq limit' do
+ before do
+ stub_full_request(project_hook.url, method: :post).to_return(status: 200, body: 'Success')
+ end
+
+ it 'queues LogExecutionWorker with request_data overrided in the second attempt' do
+ expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
+ .with(
+ project_hook.id,
+ hash_including(default_log_data),
+ :ok,
+ nil
+ )
+ .and_raise(
+ Gitlab::SidekiqMiddleware::SizeLimiter::ExceedLimitError.new(WebHooks::LogExecutionWorker, 100, 50)
+ )
+ .ordered
+ expect(WebHooks::LogExecutionWorker).to receive(:perform_async)
+ .with(
+ project_hook.id,
+ hash_including(default_log_data.merge(request_data: WebHookLog::OVERSIZE_REQUEST_DATA)),
+ :ok,
+ nil
+ )
+ .and_call_original
+ .ordered
+
+ service_instance.execute
+ end
+
+ context 'new log data still exceeds limit' do
+ before do
+ allow(WebHooks::LogExecutionWorker).to receive(:perform_async).and_raise(
+ Gitlab::SidekiqMiddleware::SizeLimiter::ExceedLimitError.new(WebHooks::LogExecutionWorker, 100, 50)
+ )
+ end
+
+ it 'raises an exception' do
+ expect do
+ service_instance.execute
+ end.to raise_error(Gitlab::SidekiqMiddleware::SizeLimiter::ExceedLimitError)
+ end
+ end
+ end
end
end
diff --git a/spec/workers/every_sidekiq_worker_spec.rb b/spec/workers/every_sidekiq_worker_spec.rb
index 330afa6fbd4..fb8ff23f8d8 100644
--- a/spec/workers/every_sidekiq_worker_spec.rb
+++ b/spec/workers/every_sidekiq_worker_spec.rb
@@ -319,6 +319,7 @@ RSpec.describe 'Every Sidekiq worker' do
'JiraConnect::SyncMergeRequestWorker' => 3,
'JiraConnect::SyncProjectWorker' => 3,
'LdapGroupSyncWorker' => 3,
+ 'Licenses::ResetSubmitLicenseUsageDataBannerWorker' => 13,
'MailScheduler::IssueDueWorker' => 3,
'MailScheduler::NotificationServiceWorker' => 3,
'MembersDestroyer::UnassignIssuablesWorker' => 3,