Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2023-09-25 18:10:16 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2023-09-25 18:10:16 +0300
commit02e4b2d0043b416314ffb76694aff200584352d5 (patch)
treea7d027891f3089960950d7ec75ba60d981623ab4
parent7a3aca2b5b3bfdebbd7bb6353d5bdcdc422670da (diff)
Add latest changes from gitlab-org/gitlab@master
-rw-r--r--Gemfile2
-rw-r--r--Gemfile.checksum2
-rw-r--r--Gemfile.lock4
-rw-r--r--app/assets/javascripts/boards/components/board_card_inner.vue2
-rw-r--r--app/assets/javascripts/issuable/components/hidden_badge.vue36
-rw-r--r--app/assets/javascripts/issuable/components/locked_badge.vue36
-rw-r--r--app/assets/javascripts/issues/constants.js2
-rw-r--r--app/assets/javascripts/issues/show/components/header_actions.vue4
-rw-r--r--app/assets/javascripts/issues/show/components/new_header_actions_popover.vue4
-rw-r--r--app/assets/javascripts/issues/show/components/sticky_header.vue36
-rw-r--r--app/assets/javascripts/merge_requests/components/header_metadata.vue57
-rw-r--r--app/assets/javascripts/merge_requests/components/merge_request_status_badge.vue2
-rw-r--r--app/assets/javascripts/merge_requests/index.js2
-rw-r--r--app/assets/javascripts/sidebar/components/confidential/sidebar_confidentiality_form.vue4
-rw-r--r--app/assets/javascripts/vue_shared/components/badges/beta_badge.vue35
-rw-r--r--app/assets/javascripts/vue_shared/components/badges/experiment_badge.stories.js24
-rw-r--r--app/assets/javascripts/vue_shared/components/badges/experiment_badge.vue43
-rw-r--r--app/assets/javascripts/vue_shared/components/badges/hover_badge.vue52
-rw-r--r--app/assets/javascripts/vue_shared/issuable/list/components/issuable_item.vue2
-rw-r--r--app/assets/javascripts/vue_shared/issuable/show/components/issuable_header.vue32
-rw-r--r--app/assets/stylesheets/page_bundles/issuable.scss13
-rw-r--r--app/helpers/application_helper.rb2
-rw-r--r--app/helpers/issuables_helper.rb16
-rw-r--r--app/views/projects/merge_requests/_mr_box.html.haml3
-rw-r--r--app/views/projects/merge_requests/_mr_title.html.haml6
-rw-r--r--app/views/shared/issuable/_status_box.html.haml8
-rw-r--r--data/deprecations/14-9-removal_monitor_respond_integrated_error_tracking.yml14
-rw-r--r--data/deprecations/15-7-remove-flowdock-integration.yml18
-rw-r--r--data/deprecations/15-9-omniauth-authentiq.yml11
-rw-r--r--data/deprecations/16-0-source-code-branch-push.yml16
-rw-r--r--data/deprecations/16.0-eol-windows-server-2004-and-20H2.yml9
-rw-r--r--doc/architecture/blueprints/gitlab_observability_backend/index.md693
-rw-r--r--doc/architecture/blueprints/gitlab_observability_backend/supported-deployments.pngbin74153 -> 0 bytes
-rw-r--r--doc/architecture/blueprints/observability_metrics/index.md286
-rw-r--r--doc/architecture/blueprints/observability_metrics/metrics-read-path.pngbin0 -> 69105 bytes
-rw-r--r--doc/architecture/blueprints/observability_metrics/metrics_indexing_at_ingestion.pngbin0 -> 132942 bytes
-rw-r--r--doc/architecture/blueprints/observability_metrics/query-service-internals.pngbin0 -> 114197 bytes
-rw-r--r--doc/update/deprecations.md72
-rw-r--r--doc/user/project/integrations/index.md15
-rw-r--r--doc/user/storage_management_automation.md113
-rw-r--r--lib/api/helpers.rb13
-rw-r--r--lib/api/usage_data.rb2
-rw-r--r--lib/gitlab/bitbucket_server_import/importers/pull_request_importer.rb10
-rw-r--r--locale/gitlab.pot37
-rw-r--r--qa/qa/tools/migrate_influx_data_to_gcs.rb5
-rw-r--r--spec/features/merge_request/admin_views_hidden_merge_request_spec.rb12
-rw-r--r--spec/features/merge_requests/admin_views_hidden_merge_requests_spec.rb2
-rw-r--r--spec/frontend/boards/board_card_inner_spec.js2
-rw-r--r--spec/frontend/issuable/components/hidden_badge_spec.js45
-rw-r--r--spec/frontend/issuable/components/locked_badge_spec.js45
-rw-r--r--spec/frontend/issues/show/components/sticky_header_spec.js29
-rw-r--r--spec/frontend/merge_requests/components/header_metadata_spec.js52
-rw-r--r--spec/frontend/vue_shared/components/badges/__snapshots__/beta_badge_spec.js.snap21
-rw-r--r--spec/frontend/vue_shared/components/badges/__snapshots__/experiment_badge_spec.js.snap41
-rw-r--r--spec/frontend/vue_shared/components/badges/beta_badge_spec.js4
-rw-r--r--spec/frontend/vue_shared/components/badges/experiment_badge_spec.js32
-rw-r--r--spec/frontend/vue_shared/components/badges/hover_badge_spec.js50
-rw-r--r--spec/frontend/vue_shared/issuable/list/components/issuable_item_spec.js2
-rw-r--r--spec/frontend/vue_shared/issuable/show/components/issuable_header_spec.js36
-rw-r--r--spec/helpers/application_helper_spec.rb4
-rw-r--r--spec/helpers/issuables_helper_spec.rb35
-rw-r--r--spec/lib/api/helpers_spec.rb21
-rw-r--r--spec/lib/gitlab/bitbucket_server_import/importers/pull_request_importer_spec.rb62
-rw-r--r--spec/models/ci/pipeline_spec.rb19
-rw-r--r--spec/requests/api/usage_data_spec.rb10
65 files changed, 1163 insertions, 1104 deletions
diff --git a/Gemfile b/Gemfile
index 2d6d42b3da0..9573ba39b06 100644
--- a/Gemfile
+++ b/Gemfile
@@ -38,7 +38,7 @@ gem 'responders', '~> 3.0'
gem 'sprockets', '~> 3.7.0'
-gem 'view_component', '~> 3.5.0'
+gem 'view_component', '~> 3.6.0'
# Supported DBs
gem 'pg', '~> 1.5.4'
diff --git a/Gemfile.checksum b/Gemfile.checksum
index b8a244ef785..82d8344329c 100644
--- a/Gemfile.checksum
+++ b/Gemfile.checksum
@@ -677,7 +677,7 @@
{"name":"validates_hostname","version":"1.0.13","platform":"ruby","checksum":"eac40178cc0b4f727df9cc6a5cb5bc2550718ad8d9bb3728df9aba6354bdda19"},
{"name":"version_gem","version":"1.1.0","platform":"ruby","checksum":"6b009518020db57f51ec7b410213fae2bf692baea9f1b51770db97fbc93d9a80"},
{"name":"version_sorter","version":"2.3.0","platform":"ruby","checksum":"2147f2a1a3804fbb8f60d268b7d7c1ec717e6dd727ffe2c165b4e05e82efe1da"},
-{"name":"view_component","version":"3.5.0","platform":"ruby","checksum":"c3e3cdf5abb2383157684d76cfb153d23bfc9834a7defa82441edab54635e8af"},
+{"name":"view_component","version":"3.6.0","platform":"ruby","checksum":"7aa45c11b4fd51583bd63b10fbc6b1a87f088182e4f026e5f4f6a9211e5a42a3"},
{"name":"virtus","version":"2.0.0","platform":"ruby","checksum":"8841dae4eb7fcc097320ba5ea516bf1839e5d056c61ee27138aa4bddd6e3d1c2"},
{"name":"vite_rails","version":"3.0.15","platform":"ruby","checksum":"b8ec528aedf7e24b54f222b449cd9250810ea2456d5f8dd4ef87f06b475cf860"},
{"name":"vite_ruby","version":"3.3.4","platform":"ruby","checksum":"025e438385a6dc2320c8c148dff453f5bb1d4f056ce69c3386f47d4c388ad80c"},
diff --git a/Gemfile.lock b/Gemfile.lock
index 3823c2528b6..c5883ced594 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -1667,7 +1667,7 @@ GEM
activesupport (>= 3.0)
version_gem (1.1.0)
version_sorter (2.3.0)
- view_component (3.5.0)
+ view_component (3.6.0)
activesupport (>= 5.2.0, < 8.0)
concurrent-ruby (~> 1.0)
method_source (~> 1.0)
@@ -2036,7 +2036,7 @@ DEPENDENCIES
valid_email (~> 0.1)
validates_hostname (~> 1.0.13)
version_sorter (~> 2.3)
- view_component (~> 3.5.0)
+ view_component (~> 3.6.0)
vite_rails
vmstat (~> 2.3.0)
warning (~> 1.3.0)
diff --git a/app/assets/javascripts/boards/components/board_card_inner.vue b/app/assets/javascripts/boards/components/board_card_inner.vue
index c441a718dd8..6830c96a6a9 100644
--- a/app/assets/javascripts/boards/components/board_card_inner.vue
+++ b/app/assets/javascripts/boards/components/board_card_inner.vue
@@ -252,7 +252,7 @@ export default {
v-if="item.hidden"
v-gl-tooltip
name="spam"
- :title="__('This issue is hidden because its author has been banned')"
+ :title="__('This issue is hidden because its author has been banned.')"
class="gl-mr-2 hidden-icon gl-text-orange-500 gl-cursor-help"
data-testid="hidden-icon"
/>
diff --git a/app/assets/javascripts/issuable/components/hidden_badge.vue b/app/assets/javascripts/issuable/components/hidden_badge.vue
new file mode 100644
index 00000000000..a80dc2f62d4
--- /dev/null
+++ b/app/assets/javascripts/issuable/components/hidden_badge.vue
@@ -0,0 +1,36 @@
+<script>
+import { GlBadge, GlIcon, GlTooltipDirective } from '@gitlab/ui';
+import { issuableTypeText } from '~/issues/constants';
+import { __, sprintf } from '~/locale';
+
+export default {
+ components: {
+ GlBadge,
+ GlIcon,
+ },
+ directives: {
+ GlTooltip: GlTooltipDirective,
+ },
+ props: {
+ issuableType: {
+ type: String,
+ required: false,
+ default: '',
+ },
+ },
+ computed: {
+ title() {
+ return sprintf(__('This %{issuable} is hidden because its author has been banned.'), {
+ issuable: issuableTypeText[this.issuableType],
+ });
+ },
+ },
+};
+</script>
+
+<template>
+ <gl-badge v-gl-tooltip :title="title" variant="warning">
+ <gl-icon name="spam" />
+ <span class="gl-sr-only">{{ __('Hidden') }}</span>
+ </gl-badge>
+</template>
diff --git a/app/assets/javascripts/issuable/components/locked_badge.vue b/app/assets/javascripts/issuable/components/locked_badge.vue
new file mode 100644
index 00000000000..f97ac888417
--- /dev/null
+++ b/app/assets/javascripts/issuable/components/locked_badge.vue
@@ -0,0 +1,36 @@
+<script>
+import { GlBadge, GlIcon, GlTooltipDirective } from '@gitlab/ui';
+import { issuableTypeText } from '~/issues/constants';
+import { __, sprintf } from '~/locale';
+
+export default {
+ components: {
+ GlBadge,
+ GlIcon,
+ },
+ directives: {
+ GlTooltip: GlTooltipDirective,
+ },
+ props: {
+ issuableType: {
+ type: String,
+ required: false,
+ default: '',
+ },
+ },
+ computed: {
+ title() {
+ return sprintf(__('This %{issuable} is locked. Only project members can comment.'), {
+ issuable: issuableTypeText[this.issuableType],
+ });
+ },
+ },
+};
+</script>
+
+<template>
+ <gl-badge v-gl-tooltip :title="title" variant="warning">
+ <gl-icon name="lock" />
+ <span class="gl-sr-only">{{ __('Locked') }}</span>
+ </gl-badge>
+</template>
diff --git a/app/assets/javascripts/issues/constants.js b/app/assets/javascripts/issues/constants.js
index 80344efc44c..3d8017e6e07 100644
--- a/app/assets/javascripts/issues/constants.js
+++ b/app/assets/javascripts/issues/constants.js
@@ -28,7 +28,7 @@ export const issuableStatusText = {
[STATUS_LOCKED]: __('Open'),
};
-export const IssuableTypeText = {
+export const issuableTypeText = {
[TYPE_ISSUE]: __('issue'),
[TYPE_EPIC]: __('epic'),
[TYPE_MERGE_REQUEST]: __('merge request'),
diff --git a/app/assets/javascripts/issues/show/components/header_actions.vue b/app/assets/javascripts/issues/show/components/header_actions.vue
index 81e5c30a264..ae5b4350512 100644
--- a/app/assets/javascripts/issues/show/components/header_actions.vue
+++ b/app/assets/javascripts/issues/show/components/header_actions.vue
@@ -14,7 +14,7 @@ import * as Sentry from '@sentry/browser';
import { mapActions, mapGetters, mapState } from 'vuex';
import { createAlert, VARIANT_SUCCESS } from '~/alert';
import { EVENT_ISSUABLE_VUE_APP_CHANGE } from '~/issuable/constants';
-import { STATUS_CLOSED, TYPE_ISSUE, IssuableTypeText } from '~/issues/constants';
+import { STATUS_CLOSED, TYPE_ISSUE, issuableTypeText } from '~/issues/constants';
import {
ISSUE_STATE_EVENT_CLOSE,
ISSUE_STATE_EVENT_REOPEN,
@@ -138,7 +138,7 @@ export default {
issueTypeText() {
const { issueType } = this;
- return IssuableTypeText[issueType] ?? issueType;
+ return issuableTypeText[issueType] ?? issueType;
},
buttonText() {
return this.isClosed
diff --git a/app/assets/javascripts/issues/show/components/new_header_actions_popover.vue b/app/assets/javascripts/issues/show/components/new_header_actions_popover.vue
index f7a324d9f3f..1e5eb53e220 100644
--- a/app/assets/javascripts/issues/show/components/new_header_actions_popover.vue
+++ b/app/assets/javascripts/issues/show/components/new_header_actions_popover.vue
@@ -4,7 +4,7 @@ import { s__, sprintf } from '~/locale';
import { getCookie, parseBoolean, setCookie } from '~/lib/utils/common_utils';
import glFeatureFlagMixin from '~/vue_shared/mixins/gl_feature_flags_mixin';
import { NEW_ACTIONS_POPOVER_KEY } from '~/issues/show/constants';
-import { IssuableTypeText } from '~/issues/constants';
+import { issuableTypeText } from '~/issues/constants';
export default {
name: 'NewHeaderActionsPopover',
@@ -34,7 +34,7 @@ export default {
computed: {
popoverText() {
return sprintf(this.$options.i18n.popoverText, {
- issueType: IssuableTypeText[this.issueType],
+ issueType: issuableTypeText[this.issueType],
});
},
showPopover() {
diff --git a/app/assets/javascripts/issues/show/components/sticky_header.vue b/app/assets/javascripts/issues/show/components/sticky_header.vue
index b8e0937d51c..738bb2c2aa0 100644
--- a/app/assets/javascripts/issues/show/components/sticky_header.vue
+++ b/app/assets/javascripts/issues/show/components/sticky_header.vue
@@ -1,5 +1,7 @@
<script>
-import { GlBadge, GlIcon, GlIntersectionObserver, GlTooltipDirective } from '@gitlab/ui';
+import { GlBadge, GlIcon, GlIntersectionObserver, GlLink } from '@gitlab/ui';
+import HiddenBadge from '~/issuable/components/hidden_badge.vue';
+import LockedBadge from '~/issuable/components/locked_badge.vue';
import {
issuableStatusText,
STATUS_CLOSED,
@@ -15,9 +17,9 @@ export default {
GlBadge,
GlIcon,
GlIntersectionObserver,
- },
- directives: {
- GlTooltip: GlTooltipDirective,
+ GlLink,
+ HiddenBadge,
+ LockedBadge,
},
props: {
isConfidential: {
@@ -88,36 +90,20 @@ export default {
<gl-icon :name="statusIcon" />
<span class="gl-display-none gl-sm-display-block gl-ml-2">{{ statusText }}</span>
</gl-badge>
- <span
- v-if="isLocked"
- v-gl-tooltip.bottom
- data-testid="locked"
- class="issuable-warning-icon"
- :title="__('This issue is locked. Only project members can comment.')"
- >
- <gl-icon name="lock" :aria-label="__('Locked')" />
- </span>
<confidentiality-badge
v-if="isConfidential"
:issuable-type="issuableType"
:workspace-type="$options.WORKSPACE_PROJECT"
/>
- <span
- v-if="isHidden"
- v-gl-tooltip.bottom
- :title="__('This issue is hidden because its author has been banned')"
- data-testid="hidden"
- class="issuable-warning-icon"
- >
- <gl-icon name="spam" />
- </span>
- <a
+ <locked-badge v-if="isLocked" :issuable-type="issuableType" />
+ <hidden-badge v-if="isHidden" :issuable-type="issuableType" />
+ <gl-link
+ class="gl-font-weight-bold gl-text-black-normal gl-text-truncate"
href="#top"
- class="gl-font-weight-bold gl-overflow-hidden gl-white-space-nowrap gl-text-overflow-ellipsis gl-my-0 gl-text-black-normal"
:title="title"
>
{{ title }}
- </a>
+ </gl-link>
</div>
</div>
</transition>
diff --git a/app/assets/javascripts/merge_requests/components/header_metadata.vue b/app/assets/javascripts/merge_requests/components/header_metadata.vue
index fce7ba385b4..bce67c54c27 100644
--- a/app/assets/javascripts/merge_requests/components/header_metadata.vue
+++ b/app/assets/javascripts/merge_requests/components/header_metadata.vue
@@ -1,20 +1,19 @@
<script>
-import { GlIcon, GlTooltipDirective } from '@gitlab/ui';
// eslint-disable-next-line no-restricted-imports
import { mapGetters } from 'vuex';
-import { __ } from '~/locale';
-import { TYPE_ISSUE, WORKSPACE_PROJECT } from '~/issues/constants';
+import HiddenBadge from '~/issuable/components/hidden_badge.vue';
+import LockedBadge from '~/issuable/components/locked_badge.vue';
+import { TYPE_ISSUE, TYPE_MERGE_REQUEST, WORKSPACE_PROJECT } from '~/issues/constants';
import ConfidentialityBadge from '~/vue_shared/components/confidentiality_badge.vue';
export default {
TYPE_ISSUE,
+ TYPE_MERGE_REQUEST,
WORKSPACE_PROJECT,
components: {
- GlIcon,
ConfidentialityBadge,
- },
- directives: {
- GlTooltip: GlTooltipDirective,
+ HiddenBadge,
+ LockedBadge,
},
inject: ['hidden'],
computed: {
@@ -25,45 +24,27 @@ export default {
isConfidential() {
return this.getNoteableData.confidential;
},
- warningIconsMeta() {
- return [
- {
- iconName: 'lock',
- visible: this.isLocked,
- dataTestId: 'locked',
- tooltip: __('This merge request is locked. Only project members can comment.'),
- },
- {
- iconName: 'spam',
- visible: this.hidden,
- dataTestId: 'hidden',
- tooltip: __('This merge request is hidden because its author has been banned'),
- },
- ];
- },
},
};
</script>
<template>
- <div class="gl-display-inline-block">
+ <span class="gl-display-contents">
<confidentiality-badge
v-if="isConfidential"
- class="gl-mr-3"
+ class="gl-align-self-center gl-mr-2"
:issuable-type="$options.TYPE_ISSUE"
:workspace-type="$options.WORKSPACE_PROJECT"
/>
- <template v-for="meta in warningIconsMeta">
- <div
- v-if="meta.visible"
- :key="meta.iconName"
- v-gl-tooltip.bottom
- :data-testid="meta.dataTestId"
- :title="meta.tooltip || null"
- class="issuable-warning-icon gl-mr-3 gl-mt-2 gl-display-flex gl-justify-content-center gl-align-items-center"
- >
- <gl-icon :name="meta.iconName" class="icon" />
- </div>
- </template>
- </div>
+ <locked-badge
+ v-if="isLocked"
+ class="gl-align-self-center gl-mr-2"
+ :issuable-type="$options.TYPE_MERGE_REQUEST"
+ />
+ <hidden-badge
+ v-if="hidden"
+ class="gl-align-self-center gl-mr-2"
+ :issuable-type="$options.TYPE_MERGE_REQUEST"
+ />
+ </span>
</template>
diff --git a/app/assets/javascripts/merge_requests/components/merge_request_status_badge.vue b/app/assets/javascripts/merge_requests/components/merge_request_status_badge.vue
index 3d5478757a8..d5900137484 100644
--- a/app/assets/javascripts/merge_requests/components/merge_request_status_badge.vue
+++ b/app/assets/javascripts/merge_requests/components/merge_request_status_badge.vue
@@ -70,5 +70,5 @@ export default {
</script>
<template>
- <status-badge class="gl-align-self-center gl-mr-3" :issuable-type="issuableType" :state="state" />
+ <status-badge class="gl-align-self-center gl-mr-2" :issuable-type="issuableType" :state="state" />
</template>
diff --git a/app/assets/javascripts/merge_requests/index.js b/app/assets/javascripts/merge_requests/index.js
index 29218eb53e0..bd5faf0329c 100644
--- a/app/assets/javascripts/merge_requests/index.js
+++ b/app/assets/javascripts/merge_requests/index.js
@@ -3,7 +3,7 @@ import { parseBoolean } from '~/lib/utils/common_utils';
import HeaderMetadata from './components/header_metadata.vue';
export function mountHeaderMetadata(store) {
- const el = document.querySelector('.js-header-metadata-root');
+ const el = document.querySelector('.js-mr-header-metadata-root');
if (!el) {
return null;
diff --git a/app/assets/javascripts/sidebar/components/confidential/sidebar_confidentiality_form.vue b/app/assets/javascripts/sidebar/components/confidential/sidebar_confidentiality_form.vue
index 7a1853b1b46..90c3fb0039d 100644
--- a/app/assets/javascripts/sidebar/components/confidential/sidebar_confidentiality_form.vue
+++ b/app/assets/javascripts/sidebar/components/confidential/sidebar_confidentiality_form.vue
@@ -1,7 +1,7 @@
<script>
import { GlSprintf, GlButton } from '@gitlab/ui';
import { createAlert } from '~/alert';
-import { TYPE_ISSUE, TYPE_TEST_CASE, IssuableTypeText } from '~/issues/constants';
+import { TYPE_ISSUE, TYPE_TEST_CASE, issuableTypeText } from '~/issues/constants';
import { __, sprintf } from '~/locale';
import { confidentialityQueries } from '../../queries/constants';
@@ -80,7 +80,7 @@ export default {
: __('at least the Reporter role');
},
issuableTypeText() {
- return IssuableTypeText[this.issuableType];
+ return issuableTypeText[this.issuableType];
},
commentText() {
return this.isTestCase ? '' : __(' and leave a comment on');
diff --git a/app/assets/javascripts/vue_shared/components/badges/beta_badge.vue b/app/assets/javascripts/vue_shared/components/badges/beta_badge.vue
index e8d33b5538e..9cac176a06f 100644
--- a/app/assets/javascripts/vue_shared/components/badges/beta_badge.vue
+++ b/app/assets/javascripts/vue_shared/components/badges/beta_badge.vue
@@ -1,10 +1,10 @@
<script>
-import { GlBadge, GlPopover } from '@gitlab/ui';
import { s__ } from '~/locale';
+import HoverBadge from './hover_badge.vue';
export default {
name: 'BetaBadge',
- components: { GlBadge, GlPopover },
+ components: { HoverBadge },
i18n: {
badgeLabel: s__('BetaBadge|Beta'),
popoverTitle: s__("BetaBadge|What's Beta?"),
@@ -41,27 +41,16 @@ export default {
</script>
<template>
- <div>
- <gl-badge ref="badge" href="#" :size="size" variant="neutral" class="gl-cursor-pointer">{{
- $options.i18n.badgeLabel
- }}</gl-badge>
- <gl-popover
- triggers="hover focus click"
- :show-close-button="true"
- :target="target"
- :title="$options.i18n.popoverTitle"
- data-testid="beta-badge"
- >
- <p>{{ $options.i18n.descriptionParagraph }}</p>
+ <hover-badge :label="$options.i18n.badgeLabel" :size="size" :title="$options.i18n.popoverTitle">
+ <p>{{ $options.i18n.descriptionParagraph }}</p>
- <p class="gl-mb-0">{{ $options.i18n.listIntroduction }}</p>
+ <p class="gl-mb-0">{{ $options.i18n.listIntroduction }}</p>
- <ul class="gl-pl-4">
- <li>{{ $options.i18n.listItemStability }}</li>
- <li>{{ $options.i18n.listItemDataLoss }}</li>
- <li>{{ $options.i18n.listItemReasonableEffort }}</li>
- <li>{{ $options.i18n.listItemNearCompletion }}</li>
- </ul>
- </gl-popover>
- </div>
+ <ul class="gl-pl-4">
+ <li>{{ $options.i18n.listItemStability }}</li>
+ <li>{{ $options.i18n.listItemDataLoss }}</li>
+ <li>{{ $options.i18n.listItemReasonableEffort }}</li>
+ <li>{{ $options.i18n.listItemNearCompletion }}</li>
+ </ul>
+ </hover-badge>
</template>
diff --git a/app/assets/javascripts/vue_shared/components/badges/experiment_badge.stories.js b/app/assets/javascripts/vue_shared/components/badges/experiment_badge.stories.js
new file mode 100644
index 00000000000..8e964c9bdf8
--- /dev/null
+++ b/app/assets/javascripts/vue_shared/components/badges/experiment_badge.stories.js
@@ -0,0 +1,24 @@
+import ExperimentBadge from './experiment_badge.vue';
+
+export default {
+ component: ExperimentBadge,
+ title: 'vue_shared/experiment-badge',
+};
+
+const template = `
+ <div style="height:600px;" class="gl-display-flex gl-justify-content-center gl-align-items-center">
+ <experiment-badge :size="size" />
+ </div>
+ `;
+
+const Template = (args, { argTypes }) => ({
+ components: { ExperimentBadge },
+ data() {
+ return { value: args.value };
+ },
+ props: Object.keys(argTypes),
+ template,
+});
+
+export const Default = Template.bind({});
+Default.args = {};
diff --git a/app/assets/javascripts/vue_shared/components/badges/experiment_badge.vue b/app/assets/javascripts/vue_shared/components/badges/experiment_badge.vue
new file mode 100644
index 00000000000..26bae71ddb8
--- /dev/null
+++ b/app/assets/javascripts/vue_shared/components/badges/experiment_badge.vue
@@ -0,0 +1,43 @@
+<script>
+import { s__ } from '~/locale';
+import HoverBadge from './hover_badge.vue';
+
+export default {
+ name: 'ExperimentBadge',
+ components: { HoverBadge },
+ i18n: {
+ badgeLabel: s__('ExperimentBadge|Experiment'),
+ popoverTitle: s__("ExperimentBadge|What's an Experiment?"),
+ descriptionParagraph: s__(
+ "ExperimentBadge|An Experiment is a feature that's in the process of being developed. It's not production-ready. We encourage users to try Experimental features and provide feedback.",
+ ),
+ listIntroduction: s__('ExperimentBadge|An Experiment:'),
+ listItemStability: s__('ExperimentBadge|May be unstable.'),
+ listItemDataLoss: s__('ExperimentBadge|Can cause data loss.'),
+ listItemNoSupport: s__('ExperimentBadge|Has no support and might not be documented.'),
+ listItemCanBeRemoved: s__('ExperimentBadge|Can be removed at any time.'),
+ },
+ props: {
+ size: {
+ type: String,
+ required: false,
+ default: 'md',
+ },
+ },
+};
+</script>
+
+<template>
+ <hover-badge :label="$options.i18n.badgeLabel" :size="size" :title="$options.i18n.popoverTitle">
+ <p>{{ $options.i18n.descriptionParagraph }}</p>
+
+ <p class="gl-mb-0">{{ $options.i18n.listIntroduction }}</p>
+
+ <ul class="gl-pl-4">
+ <li>{{ $options.i18n.listItemStability }}</li>
+ <li>{{ $options.i18n.listItemDataLoss }}</li>
+ <li>{{ $options.i18n.listItemNoSupport }}</li>
+ <li>{{ $options.i18n.listItemCanBeRemoved }}</li>
+ </ul>
+ </hover-badge>
+</template>
diff --git a/app/assets/javascripts/vue_shared/components/badges/hover_badge.vue b/app/assets/javascripts/vue_shared/components/badges/hover_badge.vue
new file mode 100644
index 00000000000..351c7bd9da0
--- /dev/null
+++ b/app/assets/javascripts/vue_shared/components/badges/hover_badge.vue
@@ -0,0 +1,52 @@
+<script>
+import { GlBadge, GlPopover } from '@gitlab/ui';
+
+export default {
+ name: 'HoverBadge',
+ components: { GlBadge, GlPopover },
+ props: {
+ label: {
+ type: String,
+ required: true,
+ },
+ title: {
+ type: String,
+ required: true,
+ },
+ size: {
+ type: String,
+ required: false,
+ default: 'md',
+ },
+ },
+ methods: {
+ target() {
+ /**
+ * BVPopover retrieves the target during the `beforeDestroy` hook to deregister attached
+ * events. Since during `beforeDestroy` refs are `undefined`, it throws a warning in the
+ * console because we're trying to access the `$el` property of `undefined`. Optional
+ * chaining is not working in templates, which is why the method is used.
+ *
+ * See more on https://gitlab.com/gitlab-org/gitlab/-/merge_requests/49628#note_464803276
+ */
+ return this.$refs.badge?.$el;
+ },
+ },
+};
+</script>
+
+<template>
+ <div>
+ <gl-badge ref="badge" href="#" :size="size" variant="neutral" class="gl-cursor-pointer">{{
+ label
+ }}</gl-badge>
+ <gl-popover
+ triggers="hover focus click"
+ :show-close-button="true"
+ :target="target"
+ :title="title"
+ >
+ <slot></slot>
+ </gl-popover>
+ </div>
+</template>
diff --git a/app/assets/javascripts/vue_shared/issuable/list/components/issuable_item.vue b/app/assets/javascripts/vue_shared/issuable/list/components/issuable_item.vue
index d9e750b9c45..4657954c8cc 100644
--- a/app/assets/javascripts/vue_shared/issuable/list/components/issuable_item.vue
+++ b/app/assets/javascripts/vue_shared/issuable/list/components/issuable_item.vue
@@ -261,7 +261,7 @@ export default {
v-if="issuable.hidden"
v-gl-tooltip
name="spam"
- :title="__('This issue is hidden because its author has been banned')"
+ :title="__('This issue is hidden because its author has been banned.')"
:aria-label="__('Hidden')"
/>
<gl-link
diff --git a/app/assets/javascripts/vue_shared/issuable/show/components/issuable_header.vue b/app/assets/javascripts/vue_shared/issuable/show/components/issuable_header.vue
index c4b92454ac0..a9b5e3a66a8 100644
--- a/app/assets/javascripts/vue_shared/issuable/show/components/issuable_header.vue
+++ b/app/assets/javascripts/vue_shared/issuable/show/components/issuable_header.vue
@@ -1,6 +1,8 @@
<script>
import { GlIcon, GlBadge, GlButton, GlLink, GlSprintf, GlTooltipDirective } from '@gitlab/ui';
import { getIdFromGraphQLId } from '~/graphql_shared/utils';
+import HiddenBadge from '~/issuable/components/hidden_badge.vue';
+import LockedBadge from '~/issuable/components/locked_badge.vue';
import { issuableStatusText, STATUS_OPEN, STATUS_REOPENED } from '~/issues/constants';
import { isExternal } from '~/lib/utils/url_utility';
import { __, n__, sprintf } from '~/locale';
@@ -16,6 +18,8 @@ export default {
GlButton,
GlLink,
GlSprintf,
+ HiddenBadge,
+ LockedBadge,
TimeAgoTooltip,
WorkItemTypeIcon,
},
@@ -101,16 +105,6 @@ export default {
? 'success'
: 'info';
},
- blockedTooltip() {
- return sprintf(__('This %{issuable} is locked. Only project members can comment.'), {
- issuable: this.issuableType,
- });
- },
- hiddenTooltip() {
- return sprintf(__('This %{issuable} is hidden because its author has been banned'), {
- issuable: this.issuableType,
- });
- },
shouldShowWorkItemTypeIcon() {
return this.showWorkItemTypeIcon && this.issuableType;
},
@@ -174,22 +168,8 @@ export default {
:issuable-type="issuableType"
:workspace-type="workspaceType"
/>
- <span v-if="blocked" class="issuable-warning-icon">
- <gl-icon
- v-gl-tooltip.bottom
- name="lock"
- :title="blockedTooltip"
- :aria-label="__('Blocked')"
- />
- </span>
- <span v-if="isHidden" class="issuable-warning-icon">
- <gl-icon
- v-gl-tooltip.bottom
- name="spam"
- :title="hiddenTooltip"
- :aria-label="__('Hidden')"
- />
- </span>
+ <locked-badge v-if="blocked" :issuable-type="issuableType" />
+ <hidden-badge v-if="isHidden" :issuable-type="issuableType" />
<work-item-type-icon
v-if="shouldShowWorkItemTypeIcon"
show-text
diff --git a/app/assets/stylesheets/page_bundles/issuable.scss b/app/assets/stylesheets/page_bundles/issuable.scss
index 5397f3d8895..07614c5271a 100644
--- a/app/assets/stylesheets/page_bundles/issuable.scss
+++ b/app/assets/stylesheets/page_bundles/issuable.scss
@@ -1,18 +1,5 @@
@import 'mixins_and_variables_and_functions';
-$issuable-warning-size: 24px;
-
-.issuable-warning-icon {
- background-color: var(--orange-50, $orange-50);
- border-radius: $border-radius-default;
- color: var(--orange-600, $orange-600);
- width: $issuable-warning-size;
- height: $issuable-warning-size;
- text-align: center;
- line-height: $gl-line-height-24;
- flex: 0 0 auto;
-}
-
.limit-container-width {
.flash-container,
.detail-page-header,
diff --git a/app/helpers/application_helper.rb b/app/helpers/application_helper.rb
index fdb04b711f5..175ecc70c9c 100644
--- a/app/helpers/application_helper.rb
+++ b/app/helpers/application_helper.rb
@@ -472,7 +472,7 @@ module ApplicationHelper
end
def hidden_resource_icon(resource, css_class: nil)
- issuable_title = _('This %{issuable} is hidden because its author has been banned')
+ issuable_title = _('This %{issuable} is hidden because its author has been banned.')
case resource
when Issue
diff --git a/app/helpers/issuables_helper.rb b/app/helpers/issuables_helper.rb
index 7f948db2f71..d893ea2a49d 100644
--- a/app/helpers/issuables_helper.rb
+++ b/app/helpers/issuables_helper.rb
@@ -231,22 +231,6 @@ module IssuablesHelper
end
end
- def state_name_with_icon(issuable)
- if issuable.is_a?(MergeRequest)
- if issuable.open?
- [_("Open"), "merge-request-open"]
- elsif issuable.merged?
- [_("Merged"), "merge"]
- else
- [_("Closed"), "merge-request-close"]
- end
- elsif issuable.open?
- [_("Open"), "issues"]
- else
- [_("Closed"), "issue-closed"]
- end
- end
-
def issuable_type_selector_data(issuable)
{
selected_type: issuable.issue_type,
diff --git a/app/views/projects/merge_requests/_mr_box.html.haml b/app/views/projects/merge_requests/_mr_box.html.haml
index 1774401ed78..2c7abb4179c 100644
--- a/app/views/projects/merge_requests/_mr_box.html.haml
+++ b/app/views/projects/merge_requests/_mr_box.html.haml
@@ -1,3 +1,4 @@
.detail-page-description.gl-pt-2.gl-pb-4.gl-display-flex.gl-align-items-baseline.gl-flex-wrap{ class: "#{'is-merge-request' if moved_mr_sidebar_enabled? && !fluid_layout}" }
- = render 'shared/issuable/status_box', issuable: @merge_request
+ .js-mr-status-box{ data: { project_path: @merge_request.project.path_with_namespace, iid: @merge_request.iid, issuable_type: 'merge_request', state: @merge_request.state } }
+ .js-mr-header-metadata-root{ data: { hidden: @merge_request.hidden?.to_s } }
= merge_request_header(@project, @merge_request)
diff --git a/app/views/projects/merge_requests/_mr_title.html.haml b/app/views/projects/merge_requests/_mr_title.html.haml
index 9c20127e102..43bd6c66f74 100644
--- a/app/views/projects/merge_requests/_mr_title.html.haml
+++ b/app/views/projects/merge_requests/_mr_title.html.haml
@@ -14,10 +14,8 @@
.detail-page-header.border-bottom-0.gl-display-block.gl-pt-5{ class: "gl-md-display-flex! #{'is-merge-request' if moved_mr_sidebar_enabled? && !fluid_layout}" }
.detail-page-header-body
- .issuable-meta.gl-display-flex
- .js-header-metadata-root{ data: { hidden: @merge_request.hidden?.to_s } }
- %h1.title.page-title.gl-font-size-h-display.gl-my-0.gl-display-inline-block{ data: { qa_selector: 'title_content' } }
- = markdown_field(@merge_request, :title)
+ %h1.title.page-title.gl-font-size-h-display.gl-my-0.gl-display-inline-block{ data: { qa_selector: 'title_content' } }
+ = markdown_field(@merge_request, :title)
- unless hide_gutter_toggle
%div
diff --git a/app/views/shared/issuable/_status_box.html.haml b/app/views/shared/issuable/_status_box.html.haml
deleted file mode 100644
index f2e4e22788a..00000000000
--- a/app/views/shared/issuable/_status_box.html.haml
+++ /dev/null
@@ -1,8 +0,0 @@
-- badge_text = state_name_with_icon(issuable)[0]
-- badge_icon = state_name_with_icon(issuable)[1]
-- badge_variant = issuable.open? ? :success : issuable.merged? ? :info : :danger
-- badge_classes = "js-mr-status-box gl-mr-3 gl-align-self-center"
-
-= gl_badge_tag({ variant: badge_variant, icon: badge_icon, icon_classes: 'gl-mr-0!' }, { class: badge_classes, data: { project_path: issuable.project.path_with_namespace, iid: issuable.iid, issuable_type: 'merge_request', state: issuable.state } }) do
- %span.gl-display-none.gl-sm-display-block.gl-ml-2
- = badge_text
diff --git a/data/deprecations/14-9-removal_monitor_respond_integrated_error_tracking.yml b/data/deprecations/14-9-removal_monitor_respond_integrated_error_tracking.yml
new file mode 100644
index 00000000000..93824a4e68b
--- /dev/null
+++ b/data/deprecations/14-9-removal_monitor_respond_integrated_error_tracking.yml
@@ -0,0 +1,14 @@
+- title: "Integrated error tracking disabled by default"
+ announcement_milestone: "14.9"
+ announcement_date: "2022-02-23" # This is the date customers were notified about the change in rate limits, making integrated error tracking unusable, see https://gitlab.com/groups/gitlab-org/-/epics/7580#communication-to-rate-limit-impacted-users
+ removal_milestone: "14.9"
+ removal_date: "2022-03-10" # The MR was merged on this date, outside of the normal release cycle, https://gitlab.com/gitlab-org/gitlab/-/merge_requests/81767
+ breaking_change: true
+ reporter: abellucci
+ body: |
+ In GitLab 14.4, GitLab released an integrated error tracking backend that replaces Sentry. This feature caused database performance issues. In GitLab 14.9, integrated error tracking is removed from GitLab.com, and turned off by default in GitLab self-managed. While we explore the future development of this feature, please consider switching to the Sentry backend by [changing your error tracking to Sentry in your project settings](https://docs.gitlab.com/ee/operations/error_tracking.html#sentry-error-tracking).
+
+ For additional background on this removal, please reference [Disable Integrated Error Tracking by Default](https://gitlab.com/groups/gitlab-org/-/epics/7580). If you have feedback please add a comment to [Feedback: Removal of Integrated Error Tracking](https://gitlab.com/gitlab-org/gitlab/-/issues/355493).
+ stage: monitor
+ tiers: [Free, Silver, Gold, Core, Premium, Ultimate]
+ issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/353639
diff --git a/data/deprecations/15-7-remove-flowdock-integration.yml b/data/deprecations/15-7-remove-flowdock-integration.yml
new file mode 100644
index 00000000000..46f8ed6bdf9
--- /dev/null
+++ b/data/deprecations/15-7-remove-flowdock-integration.yml
@@ -0,0 +1,18 @@
+- title: "Flowdock integration" # (required) Actionable title. e.g., The `confidential` field for a `Note` is deprecated. Use `internal` instead.
+ announcement_milestone: "15.7" # (required) The milestone when this feature was deprecated.
+ announcement_date: "2022-12-22" # (required) The date of the milestone release when this feature was deprecated. This should almost always be the 22nd of a month (YYYY-MM-DD), unless you did an out of band blog post.
+ removal_milestone: "15.7" # (required) The milestone when this feature is being removed.
+ removal_date: "2022-12-22" # (required) This should almost always be the 22nd of a month (YYYY-MM-DD), the date of the milestone release when this feature will be removed.
+ breaking_change: false # (required) Change to true if this removal is a breaking change.
+ reporter: arturoherrero # (required) GitLab username of the person reporting the removal
+ stage: manage # (required) String value of the stage that the feature was created in. e.g., Growth
+ issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/379197 # (required) Link to the deprecation issue in GitLab
+ body: | # (required) Do not modify this line, instead modify the lines below.
+ As of December 22, 2022, we are removing the Flowdock integration because the service was shut down on August 15, 2022.
+#
+# OPTIONAL FIELDS
+#
+ tiers: # (optional - may be required in the future) An array of tiers that the feature is available in currently. e.g., [Free, Silver, Gold, Core, Premium, Ultimate]
+ documentation_url: # (optional) This is a link to the current documentation page
+ image_url: # (optional) This is a link to a thumbnail image depicting the feature
+ video_url: # (optional) Use the youtube thumbnail URL with the structure of https://img.youtube.com/vi/UNIQUEID/hqdefault.jpg
diff --git a/data/deprecations/15-9-omniauth-authentiq.yml b/data/deprecations/15-9-omniauth-authentiq.yml
new file mode 100644
index 00000000000..2a2e2601704
--- /dev/null
+++ b/data/deprecations/15-9-omniauth-authentiq.yml
@@ -0,0 +1,11 @@
+- title: "`omniauth-authentiq` gem no longer available" # (required) Clearly explain the change. For example, "The `confidential` field for a `Note` is removed" or "CI/CD job names are limited to 250 characters."
+ announcement_milestone: "15.9" # (required) The milestone when this feature was deprecated.
+ announcement_date: "2023-02-22" # (required) The date of the milestone release when this feature was deprecated. This should almost always be the 22nd of a month (YYYY-MM-DD), unless you did an out of band blog post.
+ removal_milestone: "15.9" # (required) The milestone when this feature is being removed.
+ removal_date: "2023-02-22" # (required) This should almost always be the 22nd of a month (YYYY-MM-DD), the date of the milestone release when this feature will be removed.
+ breaking_change: true # (required) Change to false if this is not a breaking change.
+ reporter: adil.farrukh # (required) GitLab username of the person reporting the removal
+ stage: manage # (required) String value of the stage that the feature was created in. e.g., Growth
+ issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/389452 # (required) Link to the deprecation issue in GitLab
+ body: | # (required) Do not modify this line, instead modify the lines below.
+ `omniauth-authentiq` is an OmniAuth strategy gem that was part of GitLab. The company providing authentication services, Authentiq, has shut down. Therefore the gem is being removed.
diff --git a/data/deprecations/16-0-source-code-branch-push.yml b/data/deprecations/16-0-source-code-branch-push.yml
new file mode 100644
index 00000000000..ee32a486db1
--- /dev/null
+++ b/data/deprecations/16-0-source-code-branch-push.yml
@@ -0,0 +1,16 @@
+- title: "GitLab administrators must have permission to modify protected branches or tags"
+ announcement_milestone: "16.0"
+ removal_milestone: "16.0"
+ breaking_change: true
+ reporter: tlinz
+ stage: Create
+ issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/12776
+ body: | # (required) Do not modify this line, instead modify the lines below.
+ GitLab administrators can no longer perform actions on protected branches or tags unless they have been explicitly granted that permission. These actions include pushing and merging into a [protected branch](https://docs.gitlab.com/ee/user/project/protected_branches.html), unprotecting a branch, and creating [protected tags](https://docs.gitlab.com/ee/user/project/protected_tags.html).
+#
+# OPTIONAL FIELDS
+#
+ tiers: # (optional - may be required in the future) An array of tiers that the feature is available in currently. e.g., [Free, Silver, Gold, Core, Premium, Ultimate]
+ documentation_url: https://docs.gitlab.com/ee/user/project/protected_branches.html
+ image_url: # (optional) This is a link to a thumbnail image depicting the feature
+ video_url: # (optional) Use the youtube thumbnail URL with the structure of https://img.youtube.com/vi/UNIQUEID/hqdefault.jpg
diff --git a/data/deprecations/16.0-eol-windows-server-2004-and-20H2.yml b/data/deprecations/16.0-eol-windows-server-2004-and-20H2.yml
new file mode 100644
index 00000000000..267304f6a13
--- /dev/null
+++ b/data/deprecations/16.0-eol-windows-server-2004-and-20H2.yml
@@ -0,0 +1,9 @@
+- title: "Stop publishing GitLab Runner images based on Windows Server 2004 and 20H2" # (required) Clearly explain the change. For example, "The `confidential` field for a `Note` is removed" or "CI/CD job names are limited to 250 characters."
+ announcement_milestone: "16.0" # (required) The milestone when this feature was deprecated.
+ removal_milestone: "16.0" # (required) The milestone when this feature is being removed.
+ breaking_change: false # (required) Change to false if this is not a breaking change.
+ reporter: DarrenEastman # (required) GitLab username of the person reporting the removal
+ stage: Verify # (required) String value of the stage that the feature was created in. e.g., Growth
+ issue_url: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/31001 # (required) Link to the deprecation issue in GitLab
+ body: | # (required) Do not modify this line, instead modify the lines below.
+ As of GitLab 16.0, GitLab Runner images based on Windows Server 2004 and 20H2 will not be provided as these operating systems are end-of-life.
diff --git a/doc/architecture/blueprints/gitlab_observability_backend/index.md b/doc/architecture/blueprints/gitlab_observability_backend/index.md
deleted file mode 100644
index 3efdaa0c462..00000000000
--- a/doc/architecture/blueprints/gitlab_observability_backend/index.md
+++ /dev/null
@@ -1,693 +0,0 @@
----
-status: proposed
-creation-date: "2022-11-09"
-authors: [ "@ankitbhatnagar" ]
-coach: "@mappelman"
-approvers: [ "@sebastienpahl", "@nicholasklick" ]
-owning-stage: "~monitor::observability"
-participating-stages: []
----
-
-<!-- vale gitlab.FutureTense = NO -->
-
-# GitLab Observability Backend - Metrics
-
-## Summary
-
-Developing a multi-user system to store & query observability data typically formatted in widely accepted, industry-standard formats using Clickhouse as underlying storage, with support for long-term data retention and aggregation.
-
-## Motivation
-
-From the six pillars of Observability, commonly abbreviated as `TEMPLE` - Traces, Events, Metrics, Profiles, Logs & Errors, Metrics constitute one of the most important pillars of observability data for modern day systems, helping their users gather insights about their operational posture.
-
-Metrics which are commonly structured as timeseries data have the following characteristics:
-
-- indexed by their corresponding timestamps;
-- continuously expanding in size;
-- usually aggregated, down-sampled, and queried in ranges; and
-- have very write-intensive requirements.
-
-Within GitLab Observability Backend, we aim to add the support for our customers to ingest and query observability data around their systems & applications, helping them improve the operational health of their systems.
-
-### Goals
-
-With the development of the proposed system, we have the following goals:
-
-- Scalable, low latency & cost-effective monitoring system backed by Clickhouse whose performance has been proven via repeatable benchmarks.
-
-- Support for long-term storage for Prometheus/OpenTelemetry formatted metrics, ingested via Prometheus remote_write API and queried via Prometheus remote_read API, PromQL or SQL with support for metadata and exemplars.
-
-The aforementioned goals can further be broken down into the following four sub-goals:
-
-#### Ingesting data
-
-- For the system to be capable of ingesting large volumes of writes and reads, we aim to ensure that it must be horizontally scalable & provide durability guarantees to ensure no writes are dropped once ingested.
-
-#### Persisting data
-
-- We aim to support ingesting telemetry/data sent using Prometheus `remote_write` protocol. Any persistence we design for our dataset must be multi-tenant by default, ensuring we can store observability data for multiple tenants/groups/projects within the same storage backend.
-
-- We aim to develop a test suite for data correctness, seeking inspiration from how Prometheus compliance test suite checks the correctness of a given Metrics implementation and running it as a part of our CI setup.
-
-NOTE:
-Although remote_write_sender does not test the correctness of a remote write receiver itself as is our case, it does bring some inspiration to implement/develop one within the scope of this project.
-
-- We aim to also ensure compatibility for special Prometheus data types, for example, Prometheus histogram(s), summary(s).
-
-#### Reading data
-
-- We aim to support querying data using PromQL which means translating PromQL queries into Clickhouse SQL. To do this, [PromQL](https://github.com/prometheus/prometheus/tree/main/promql/parser) or [MetricsQL](https://github.com/VictoriaMetrics/metricsql) parsers are good alternatives.
-
-- We aim to provide additional value by exposing all ingested data via the native Clickhouse SQL interface subject to the following reliability characteristics:
- - query validation, sanitation
- - rate limiting
- - resource limiting - memory, cpu, network bandwidth
-
-- We aim to pass Prometheus test suits for correctness via the [Prometheus Compliance test suite](https://github.com/prometheus/compliance/tree/main/promql) with a target goal of 100% success rate.
-
-#### Deleting data
-
-- We aim to support being able to delete any ingested data should such a need arise. This is also in addition to us naturally deleting data when a configured TTL expires and/or respective retention policies are enforced. We must, within our schemas, build a way to delete data by labels OR their content, also add to our offering the necessary tooling to do so.
-
-### Non-Goals
-
-With the goals established above, we also want to establish what specific things are non-goals with the current proposal. They are:
-
-- We do not aim to support ingestion using OpenTelemetry/OpenMetrics formats with our first iteration, though our users can still use the Opentelemetry exporters(s) internally consuming the standard Prometheus `remote_write` protocol. More information [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter/prometheusremotewriteexporter).
-
-- We do not aim to support ingesting Prometheus exemplars in our first iteration, though we do aim to account for them in our design from the beginning.
-
-NOTE:
-Worth noting that we intend to model exemplars the same way we're modeling metric-labels, so building on top of the same data structure should help implementt support for metadata/exemplars rather easily.
-
-## Proposal
-
-We intend to use GitLab Observability Backend as a framework for the Metrics implementation so that its lifecycle is also managed via already existing Kubernetes controllers for example, scheduler, tenant-operator.
-
-![Architecture](supported-deployments.png)
-
-From a development perspective, what's been marked as our "Application Server" above needs to be developed as a part of this proposal while the remaining peripheral components either already exist or can be provisioned via existing code in `scheduler`/`tenant-operator`.
-
-**On the write path**, we expect to receive incoming data via `HTTP`/`gRPC` `Ingress` similar to what we do for our existing services, for example, errortracking, tracing.
-
-NOTE:
-Additionally, since we intend to ingest data via Prometheus `remote_write` API, the received data will be Protobuf-encoded, Snappy-compressed. All received data therefore needs to be decompressed & decoded to turn it into a set of `prompb.TimeSeries` objects, which the rest of our components interact with.
-
-We also need to make sure to avoid writing a lot of small writes into Clickhouse, therefore it'd be prudent to batch data before writing it into Clickhouse.
-
-We must also make sure ingestion remains decoupled with `Storage` so as to reduce undue dependence on a given storage implementation. While we do intend to use Clickhouse as our backing storage for any foreseeable future, this ensures we do not tie ourselves in into Clickhouse too much should future business requirements warrant the usage of a different backend/technology. A good way to implement this in Go would be our implementations adhering to a standard interface, the following for example:
-
-```go
-type Storage interface {
- Read(
- ctx context.Context,
- request *prompb.ReadRequest
- ) (*prompb.ReadResponse, error)
- Write(
- ctx context.Context,
- request *prompb.WriteRequest
- ) error
-}
-```
-
-NOTE:
-We understand this couples the implementation with Prometheus data format/request types, but adding methods to the interface to support more data formats should be trivial looking forward with minimal changes to code.
-
-**On the read path**, we aim to allow our users to use the Prometheus `remote_read` API and be able to query ingested data via PromQL & SQL. Support for `remote_read` API should be trivial to implement, while supporting PromQL would need translating it into SQL. We can however employ the usage of already existing [PromQL](https://github.com/prometheus/prometheus/tree/main/promql/parser) parsing libraries.
-
-We aim to focus on implementing query validation & sanitation, rate-limiting and regulating resource-consumption to ensure underlying systems, esp. storage, remain in good operational health at all times.
-
-### Supported deployments
-
-In this first iteration of the metrics backend, we intend to support a generic deployment model that makes sure we can capture as much usage as possible and begin dogfooding the product as soon as possible. This is well illustrated in the [aforementioned architecture diagram](#proposal).
-
-In its most vanilla form, metrics support in GitLab Observability Backend can be used via the Prometheus remote read & write APIs. If a user already uses Prometheus as their monitoring abstraction, it can be configured to use this backend directly.
-
-- remote_write: [configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
-- remote_read: [configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read)
-
-For users of the system that do not use a Prometheus instance for scraping their telemetry data, they can export their metrics via a multitude of collectors/agents such as the OpenTelemetry collector or the Prometheus Agent for example, all of which can be configured to use our remote_write endpoint. For reads however, we intend to run a Prometheus within GOB (alongside the application server) itself, then hook it up automatically with the GitLab Observability UI (GOUI) preconfigured to consume our remote_read endpoint.
-
-Notably, the ability to use a GOB-run Prometheus instance is applicable while we can only support remote_read API for running queries. Looking forward towards our next iteration, we should be able to get rid of this additional component altogether when we have full support for executing PromQL and/or SQL queries directly from GOUI.
-
-**Per-group deployments**: From a scalability perspective, we deploy an instance of Ingress, a Prometheus instance & the application server per group to make sure we can scale them subject to traffic volumes of the respective tenant. It also helps isolate resource consumption across tenants in an otherwise multi-tenant system.
-
-### Metric collection and storage
-
-It is important to separate metric collection on the client side with the storage we provision at our end.
-
-### State of the art for storage
-
-Existing long-term Prometheus compatible metrics vendors provide APIs that are compatible with Prometheus remote_write.
-
-### State of the art for Prometheus clients
-
-Metric collection clients such as Prometheus itself, Grafana Cloud Agent, Datadog Agent, etc. will scrape metrics endpoints typically from within a firewalled environment, store locally scraped metrics in a [Write Ahead Log (WAL)](https://en.wikipedia.org/wiki/Write-ahead_logging) and then batch send them to an external environment (i.e. the vendor or an internally managed system like Thanos) via the Prometheus `remote_write` protocol.
-
-- A client-side collector is an important part of the overall architecture, though it's owned by the customer/user since it needs to run in their environment. This gives the end user full control over their data because they control how it is collected and to where it is delivered.
-
-- It's **not** feasible to provide an external vendor with credentials to access and scrape endpoints within a user's firewalled environment.
-
-- It's also critically important that our `remote_write` APIs respond correctly with the appropriate rate-limiting status codes so that Prometheus Clients can respect them.
-
-[Here](https://grafana.com/blog/2021/05/26/the-future-of-prometheus-remote-write/) is a good background/history on Prometheus `remote_write` and its importance in Prometheus based observability.
-
-## Design and implementation details
-
-Following are details of how we aim to design & implement the proposed solution. To that end, a reference implementation was also developed to understand the scope of the problem and provide early data to ensure our proposal was drafted around informed decisions and/or results of our experimentation.
-
-## Reference implementation(s)
-
-- [Application server](https://gitlab.com/gitlab-org/opstrace/opstrace/-/merge_requests/1823)
-- [Metrics generator](https://gitlab.com/ankitbhatnagar/metrics-gen/-/blob/main/main.go)
-
-## Target environments
-
-Keeping inline with our current operational structure, we intend to deploy the metrics offering as a part of GitLab Observability Backend, deployed on the following two target environments:
-
-- kind cluster (for local development)
-- GKE cluster (for staging/production environments)
-
-## Schema Design
-
-### **Proposed solution**: Fully normalized tables for decreased redundancy & increased read performance
-
-### primary, denormalized data table
-
-```sql
-CREATE TABLE IF NOT EXISTS samples ON CLUSTER '{cluster}' (
- series_id UUID,
- timestamp DateTime64(3, 'UTC') CODEC(Delta(4), ZSTD),
- value Float64 CODEC(Gorilla, ZSTD)
-) ENGINE = ReplicatedMergeTree()
-PARTITION BY toYYYYMMDD(timestamp)
-ORDER BY (series_id, timestamp)
-```
-
-### metadata table to support timeseries metadata/exemplars
-
-```sql
-CREATE TABLE IF NOT EXISTS samples_metadata ON CLUSTER '{cluster}' (
- series_id UUID,
- timestamp DateTime64(3, 'UTC') CODEC(Delta(4), ZSTD),
- metadata Map(String, String) CODEC(ZSTD),
-) ENGINE = ReplicatedMergeTree()
-PARTITION BY toYYYYMMDD(timestamp)
-ORDER BY (series_id, timestamp)
-```
-
-### lookup table(s)
-
-```sql
-CREATE TABLE IF NOT EXISTS labels_to_series ON CLUSTER '{cluster}' (
- labels Map(String, String) CODEC(ZSTD)
- series_id UUID
-) engine=ReplicatedMergeTree
-PRIMARY KEY (labels, series_id)
-```
-
-```sql
-CREATE TABLE IF NOT EXISTS group_to_series ON CLUSTER '{cluster}'' (
- group_id Uint64,
- series_id UUID,
-) ORDER BY (group_id, series_id)
-```
-
-### Refinements
-
-- sharding considerations for a given tenant when ingesting/persisting data if we intend to co-locate data specific to multiple tenants within the same database tables. To simplify things, segregating tenant-specific data to their own dedicated set of tables would make a lot of sense.
-
-- structural considerations for "timestamps" when ingesting data across tenants.
-
-- creation_time vs ingestion_time
-
-- No support for transactions in the native client yet, to be able to effectively manage writes across multiple tables.
-
-NOTE:
-Slightly non-trivial but we can potentially investigate the possibility of using ClickHouse/ch-go directly, it supposedly promises a better performance profile too.
-
-### Pros - multiple tables
-
-- Normalised data structuring allows for efficient storage of data, removing any redundancy across multiple samples for a given timeseries. Evidently, for the "samples" schema, we expect to store 32 bytes of data per metric point.
-
-- Better search complexity when filtering timeseries by labels/metadata, via the use of better indexed columns.
-
-- All data is identifiable via a unique identifier, which can be used to maintain data consistency across tables.
-
-### Cons - multiple tables
-
-- Writes are trivially expensive considering writes across multiple tables.
-
-- Writes across tables also need to be implemented as a transaction to guarantee consistency when ingesting data.
-
-### Operational characteristics - multiple tables
-
-### Storage - multiple tables
-
-A major portion of our writes are made into the `samples` schema which contains a tuple containing three data points per metric point written:
-
-| Column | Data type | Byte size |
-|:------------|:-----------|:----------|
-| `series_id` | UUID | 16 bytes |
-| `timestamp` | DateTime64 | 8 bytes |
-| `value` | Float64 | 8 bytes |
-
-Therefore, we estimate to use 32 bytes per sample ingested.
-
-### Compression - multiple tables
-
-Inspecting the amount of compression we're able to get with the given design on our major schemas, we see it as a good starting point. Following measurements for both primary tables:
-
-**Schema**: `labels_to_series` containing close to 12k unique `series_id`, each mapping to a set of 10-12 label string pairs
-
-```sql
-SELECT
- table,
- column,
- formatReadableSize(sum(data_compressed_bytes) AS x) AS compressedsize,
- formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed
-FROM system.parts_columns
-WHERE table LIKE 'labels_to_series_1'
-GROUP BY
- database,
- table,
- column
-ORDER BY x ASC
-
-Query id: 723b4145-14f7-4e74-9ada-01c17c2f1fd5
-
-┌─table──────────────┬─column────┬─compressedsize─┬─uncompressed─┐
-│ labels_to_series_1 │ labels │ 586.66 KiB │ 2.42 MiB │
-│ labels_to_series_1 │ series_id │ 586.66 KiB │ 2.42 MiB │
-└────────────────────┴───────────┴────────────────┴──────────────┘
-```
-
-**Schema**: `samples` containing about 20k metric samples each containing a tuple comprising `series_id` (16 bytes), `timestamp` (8 bytes) and `value` (8 bytes).
-
-```sql
-SELECT
- table,
- column,
- formatReadableSize(sum(data_compressed_bytes) AS x) AS compressedsize,
- formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed
-FROM system.parts_columns
-WHERE table LIKE 'samples_1'
-GROUP BY
- database,
- table,
- column
-ORDER BY x ASC
-
-Query id: 04219cea-06ea-4c5f-9287-23cb23c023d2
-
-┌─table─────┬─column────┬─compressedsize─┬─uncompressed─┐
-│ samples_1 │ value │ 373.21 KiB │ 709.78 KiB │
-│ samples_1 │ timestamp │ 373.21 KiB │ 709.78 KiB │
-│ samples_1 │ series_id │ 373.21 KiB │ 709.78 KiB │
-└───────────┴───────────┴────────────────┴──────────────┘
-```
-
-### Performance - multiple tables
-
-From profiling our reference implementation, it can also be noted that most of our time right now is spent in the application writing data to Clickhouse and/or its related operations. A "top" pprof profile sampled from the implementation looked like:
-
-```shell
-(pprof) top
-Showing nodes accounting for 42253.20kB, 100% of 42253.20kB total
-Showing top 10 nodes out of 58
- flat flat% sum% cum cum%
-13630.30kB 32.26% 32.26% 13630.30kB 32.26% github.com/ClickHouse/clickhouse-go/v2/lib/compress.NewWriter (inline)
-11880.92kB 28.12% 60.38% 11880.92kB 28.12% github.com/ClickHouse/clickhouse-go/v2/lib/compress.NewReader (inline)
- 5921.37kB 14.01% 74.39% 5921.37kB 14.01% bufio.NewReaderSize (inline)
- 5921.37kB 14.01% 88.41% 5921.37kB 14.01% bufio.NewWriterSize (inline)
- 1537.69kB 3.64% 92.04% 1537.69kB 3.64% runtime.allocm
- 1040.73kB 2.46% 94.51% 1040.73kB 2.46% github.com/aws/aws-sdk-go/aws/endpoints.init
- 1024.41kB 2.42% 96.93% 1024.41kB 2.42% runtime.malg
- 768.26kB 1.82% 98.75% 768.26kB 1.82% go.uber.org/zap/zapcore.newCounters
- 528.17kB 1.25% 100% 528.17kB 1.25% regexp.(*bitState).reset
- 0 0% 100% 5927.73kB 14.03% github.com/ClickHouse/clickhouse-go/v2.(*clickhouse).Ping
-```
-
-As is evident above from our preliminary analysis, writing data into Clickhouse can be a potential bottleneck. Therefore, on the write path, it'd be prudent to batch our writes into Clickhouse so as to reduce the amount of work the application server ends up doing making the ingestion path more efficient.
-
-On the read path, it's also possible to parallelize reads for the samples table either by `series_id` OR by blocks of time between the queried start and end timestamps.
-
-### Caveats
-
-- When dropping labels from already existing metrics, we treat their new counterparts as completely new series and hence attribute them to a new `series_id`. This avoids having to merge series data and/or values. The old series, if not actively written into, should eventually fall off their retention and get deleted.
-
-- We have not yet accounted for any data aggregation. Our assumption is that the backing store (in Clickhouse) should allow us to keep a "sufficient" amount of data in its raw form and that we should be able to query against it within our query latency SLOs.
-
-### **Rejected alternative**: Single, centralized table
-
-### single, centralized data table
-
-```sql
-CREATE TABLE IF NOT EXISTS metrics ON CLUSTER '{cluster}' (
- group_id UInt64,
- name LowCardinality(String) CODEC(ZSTD),
- labels Map(String, String) CODEC(ZSTD),
- metadata Map(String, String) CODEC(ZSTD),
- value Float64 CODEC (Gorilla, ZSTD),
- timestamp DateTime64(3, 'UTC') CODEC(Delta(4),ZSTD)
-) ENGINE = ReplicatedMergeTree()
-PARTITION BY toYYYYMMDD(timestamp)
-ORDER BY (group_id, name, timestamp);
-```
-
-### Pros - single table
-
-- Single source of truth, so all metrics data lives in one big table.
-
-- Querying data is easier to express in terms of writing SQL queries without having to query data across multiple tables.
-
-### Cons - single table
-
-- Huge redundancy built into the data structure since attributes such as name, labels, metadata are stored repeatedly for each sample collected.
-
-- Non-trivial complexity to search timeseries with values for labels/metadata given how they're stored when backed by Maps/Arrays.
-
-- High query latencies by virtue of having to scan large amounts of data per query made.
-
-### Operational Characteristics - single table
-
-### Storage - single table
-
-| Column | Data type | Byte size |
-|:------------|:--------------------|:----------|
-| `group_id` | UUID | 16 bytes |
-| `name` | String | - |
-| `labels` | Map(String, String) | - |
-| `metadata` | Map(String, String) | - |
-| `value` | Float64 | 8 bytes |
-| `timestamp` | DateTime64 | 8 bytes |
-
-NOTE:
-Strings are of an arbitrary length, the length is not limited. Their value can contain an arbitrary set of bytes, including null bytes. We will need to regulate what we write into these columns application side.
-
-### Compression - single table
-
-**Schema**: `metrics` containing about 20k metric samples each consisting of a `group_id`, `metric name`, `labels`, `metadata`, `timestamp` & corresponding `value`.
-
-```sql
-SELECT count(*)
-FROM metrics_1
-
-Query id: e580f20b-b422-4d93-bb1f-eb1435761604
-
-┌─count()─┐
-│ 12144 │
-
-
-SELECT
- table,
- column,
- formatReadableSize(sum(data_compressed_bytes) AS x) AS compressedsize,
- formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed
-FROM system.parts_columns
-WHERE table LIKE 'metrics_1'
-GROUP BY
- database,
- table,
- column
-ORDER BY x ASC
-
-Query id: b2677493-3fbc-46c1-a9a7-4524a7a86cb4
-
-┌─table─────┬─column────┬─compressedsize─┬─uncompressed─┐
-│ metrics_1 │ labels │ 283.02 MiB │ 1.66 GiB │
-│ metrics_1 │ metadata │ 283.02 MiB │ 1.66 GiB │
-│ metrics_1 │ group_id │ 283.02 MiB │ 1.66 GiB │
-│ metrics_1 │ value │ 283.02 MiB │ 1.66 GiB │
-│ metrics_1 │ name │ 283.02 MiB │ 1.66 GiB │
-│ metrics_1 │ timestamp │ 283.02 MiB │ 1.66 GiB │
-└───────────┴───────────┴────────────────┴──────────────┘
-```
-
-Though we see a good compression factor for the aforementioned schema, the amount of storage needed to store the corresponding dataset is approximately 300MiB. We also expect to see this footprint increase linearly given the redundancy baked into the schema design itself, also one of the reasons we intend **not** to proceed with this design further.
-
-### Performance - single table
-
-```shell
-(pprof) top
-Showing nodes accounting for 12844.95kB, 100% of 12844.95kB total
-Showing top 10 nodes out of 40
- flat flat% sum% cum cum%
- 2562.81kB 19.95% 19.95% 2562.81kB 19.95% runtime.allocm
- 2561.90kB 19.94% 39.90% 2561.90kB 19.94% github.com/aws/aws-sdk-go/aws/endpoints.init
- 2374.91kB 18.49% 58.39% 2374.91kB 18.49% github.com/ClickHouse/clickhouse-go/v2/lib/compress.NewReader (inline)
- 1696.32kB 13.21% 71.59% 1696.32kB 13.21% bufio.NewWriterSize (inline)
- 1184.27kB 9.22% 80.81% 1184.27kB 9.22% bufio.NewReaderSize (inline)
- 1184.27kB 9.22% 90.03% 1184.27kB 9.22% github.com/ClickHouse/clickhouse-go/v2/lib/compress.NewWriter (inline)
- 768.26kB 5.98% 96.01% 768.26kB 5.98% go.uber.org/zap/zapcore.newCounters
- 512.20kB 3.99% 100% 512.20kB 3.99% runtime.malg
- 0 0% 100% 6439.78kB 50.13% github.com/ClickHouse/clickhouse-go/v2.(*clickhouse).Ping
- 0 0% 100% 6439.78kB 50.13% github.com/ClickHouse/clickhouse-go/v2.(*clickhouse).acquire
-```
-
-Writes against this schema perform much better in terms of compute, given it's concentrated on one table and does not need looking up `series_id` from a side table.
-
-### General storage considerations - Clickhouse
-
-The following sections intend to deep-dive into specific characteristics of our schema design and/or their interaction with Clickhouse - the database system.
-
-- table engines
-
- - [MergeTree](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree)
- - [S3 Table Engine](https://clickhouse.com/docs/en/engines/table-engines/integrations/s3)
-
-- efficient partitioning and/or sharding
-
- - Configuring our schemas with the right partitioning keys so as to have the least amount of blocks scanned when reading back the data.
- - Sharding here would refer to how we design our data placement strategy to make sure the cluster remains optimally balanced at all times.
-
-- data compression
-
-As is visible from the aforementioned preliminary results, we see good compression results with dictionary and delta encoding for strings and floats respectively. When storing labels with a `Map` of `LowCardinality(String)`s, we were able to pack data efficiently.
-
-- materialized views
-
-Can be updated dynamically as the need be, help make read paths performant
-
-- async inserts
-
-- batch inserts
-
-- retention/TTLs
-
-We should only store data for a predetermined period of time, post which we either delete data, aggregate it or ship it to an archival store to reduce operational costs of having to store data for longer periods of time.
-
-- data aggregation/rollups
-
-- index granularity
-
-- skip indexes
-
-- `max_server_memory_usage_to_ram_ratio`
-
-### Data access via SQL
-
-While our corpus of data is PromQL-queryable, it would be prudent to make sure we make the SQL interface
-"generally available" as well. This capability opens up multiple possibilities to query resident data and
-allows our users to slice and dice their datasets whichever way they prefer to and/or need to.
-
-#### Challenges
-
-- Resource/cost profiling.
-- Query validation and sanitation.
-
-### Illustrative example(s) of data access
-
-### Writes
-
-On the write path, we first ensure registering a given set labels to a unique `series_id` and/or re-using one should we have seen the timeseries already in the past. For example:
-
-```plaintext
-redis{region="us-east-1",'os':'Ubuntu15.10',...} <TIMESTAMP> <VALUE>
-```
-
-**Schema**: labels_to_series
-
-```sql
-SELECT *
-FROM labels_to_series_1
-WHERE series_id = '6d926ae8-c3c3-420e-a9e2-d91aff3ac125'
-FORMAT Vertical
-
-Query id: dcbc4bd8-0bdb-4c35-823a-3874096aab6e
-
-Row 1:
-──────
-labels: {'arch':'x64','service':'1','__name__':'redis','region':'us-east-1','os':'Ubuntu15.10','team':'LON','service_environment':'production','rack':'36','service_version':'0','measurement':'pubsub_patterns','hostname':'host_32','datacenter':'us-east-1a'}
-series_id: 6d926ae8-c3c3-420e-a9e2-d91aff3ac125
-
-1 row in set. Elapsed: 0.612 sec.
-```
-
-Post which, we register each metric point in the `samples` table attributing it to the corresponding `series_id`.
-
-**Schema**: samples
-
-```sql
-SELECT *
-FROM samples_1
-WHERE series_id = '6d926ae8-c3c3-420e-a9e2-d91aff3ac125'
-LIMIT 1
-FORMAT Vertical
-
-Query id: f3b410af-d831-4859-8828-31c89c0385b5
-
-Row 1:
-──────
-series_id: 6d926ae8-c3c3-420e-a9e2-d91aff3ac125
-timestamp: 2022-11-10 12:59:14.939
-value: 0
-```
-
-### Reads
-
-On the read path, we first query all timeseries identifiers by searching for the labels under consideration. Once we have all the `series_id`(s), we then look up all corresponding samples between the query start timestamp and end timestamp.
-
-For example:
-
-```plaintext
-kernel{service_environment=~"prod.*", measurement="boot_time"}
-```
-
-which gets translated into first looking for all related timeseries:
-
-```sql
-SELECT *
-FROM labels_to_series
-WHERE
-((labels['__name__']) = 'kernel') AND
-match(labels['service_environment'], 'prod.*') AND
-((labels['measurement']) = 'boot_time');
-```
-
-yielding a bunch of `series_id`(s) corresponding to the labels just looked up.
-
-**Sidenote**, this mostly-static dataset can also be cached and built up in-memory gradually to reduce paying the latency cost the second time, which should reduce the number of lookups considerably.
-
-To account for newer writes when maintaining this cache:
-
-- Have an out-of-band process/goroutine maintain this cache, so even if a few queries miss the most recent data, subsequent ones eventually catch up.
-
-- Have TTLs on the keys, jittered per key so as to rebuild them frequently enough to account for new writes.
-
-Once we know which timeseries we're querying for, from there, we can easily look up all samples via the following query:
-
-```sql
-SELECT *
-FROM samples
-WHERE series_id IN (
- 'a12544be-0a3a-4693-86b0-c61a4553aea3',
- 'abd42fc4-74c7-4d80-9b6c-12f673db375d',
- …
-)
-AND timestamp >= '1667546789'
-AND timestamp <= '1667633189'
-ORDER BY timestamp;
-```
-
-yielding all timeseries samples we were interested in.
-
-We then render these into an array of `prometheus.QueryResult` object(s) and return back to the caller as a `prometheus.ReadResponse` object.
-
-NOTE:
-The queries have been broken down into multiple queries only during our early experimentation/iteration, it'd be prudent to use subqueries within the same roundtrip to the database going forward into production/benchmarking.
-
-## Production Readiness
-
-### Batching
-
-Considering we'll need to batch data before ingesting large volumes of small writes into Clickhouse, the design must account for app-local persistence to allow it to locally batch incoming data before landing it into Clickhouse in batches of a predetermined size in order to increase performance and allow the table engine to continue to persist data successfully.
-
-We have considered the following alternatives to implement app-local batching:
-
-- In-memory - non durable
-- BadgerDB - durable, embedded, performant
-- Redis - trivial, external dependency
-- Kafka - non-trivial, external dependency but it can augment multiple other use-cases and help other problem domains at GitLab.
-
-**Note**: Similar challenges have also surfaced with the CH interactions `errortracking` - the subsystem has in its current implementation. There have been multiple attempts to solve this problem domain in the past - [this MR](https://gitlab.com/gitlab-org/opstrace/opstrace/-/merge_requests/1660) implemented an in-memory alternative while [this one](https://gitlab.com/gitlab-org/opstrace/opstrace/-/merge_requests/1767) attempted an on-disk alternative.
-
-Any work done in this area of concern would also benefit other subsystems such as errortracking, logging, etc.
-
-### Scalability
-
-We intend to start testing the proposed implementation with 10K metric-points per second to test/establish our initial hypothesis, though ideally, we must design the underlying backend for 1M points ingested per second.
-
-### Benchmarking
-
-We propose the following three dimensions be tested while benchmarking the proposed implementation:
-
-- Data ingest performance
-- On-disk storage requirements (accounting for replication if applicable)
-- Mean query response times
-
-For understanding performance, we'll need to first compile a list of such queries given the data we ingest for our tests. Clickhouse query logging is super helpful while doing this.
-
-NOTE:
-Ideally, we aim to benchmark the system to be able to ingest >1M metric points/sec while consistently serving most queries under <1 sec.
-
-### Past work & references
-
-- [Benchmark ClickHouse for metrics](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/1666)
-- [Incubation:APM ClickHouse evaluation](https://gitlab.com/gitlab-org/incubation-engineering/apm/apm/-/issues/4)
-- [Incubation:APM ClickHouse metrics schema](https://gitlab.com/gitlab-org/incubation-engineering/apm/apm/-/issues/10)
-- [Our research around TimescaleDB](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/14137)
-- [Current Workload on our Thanos-based setup](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15420#current-workload)
-- [Scaling-200m-series](https://opstrace.com/blog/scaling-200m-series)
-
-### Cost-estimation
-
-- We aim to make sure the system's not too expensive, especially given our biggest footprint is on Clickhouse and the underlying storage.
-
-- We must consider the usage of multiple storage medium(s), especially:
- - Tiered storage
- - Object storage
-
-### Tooling
-
-- We aim to building visibility into high cardinality metrics to be able to assist with keeping our databases healthy by pruning/dropping unused metrics.
-
-- Similarly, we aim to develop the ability to see unused metrics for the end-user, which can be easily & dynamically built into the system by parsing all read requests and building usage statistics.
-
-- We aim to add monitoring for per-metric scrape frequencies to make sure the end-user is not ingesting data at a volume they do not need and/or find useful.
-
-## Looking forward
-
-### Linkage across telemetry pillars, exemplars
-
-We must build the metrics system in a way to be able cross-reference ingested data with other telemetry pillars, such as traces, logs and errors, so as to provide a more holistic view of all instrumentation a system sends our way.
-
-### User-defined SQL queries to aggregate data and/or generate materialized views
-
-We should allow users of the system to be able to run user-defined, ad-hoc queries similar to how Prometheus recording rules help generate custom metrics from existing ones.
-
-### Write Ahead Logs (WALs)
-
-We believe that should we feel the need to start buffering data local to the ingestion application and/or move away from Clickhouse for persisting data, on-disk WALs would be a good direction to proceed into given their prevelant usage among other monitoring system.
-
-### Custom DSLs or query builders
-
-Using PromQL directly could be a steep learning curve for users. It would be really nice to have a query builder (as is common in Grafana) to allow building of the typical queries you'd expect to run and to allow exploration of the available metrics. It also serves as a way to learn the DSL, so more complex queries can be created later.
-
-## Roadmap & Next Steps
-
-The following section enlists how we intend to implement the aforementioned proposal around building Metrics support into GitLab Observability Service. Each corresponding document and/or issue contains further details of how each next step is planned to be executed.
-
-- **DONE** [Research & draft design proposal and/or requirements](https://docs.google.com/document/d/1kHyIoWEcs14sh3CGfKGiI8QbCsdfIHeYkzVstenpsdE/edit?usp=sharing)
-- **IN-PROGRESS** [Submit system/schema designs (proposal) & gather feedback](https://docs.google.com/document/d/1kHyIoWEcs14sh3CGfKGiI8QbCsdfIHeYkzVstenpsdE/edit?usp=sharing)
-- **IN-PROGRESS** [Develop table definitions and/or storage interfaces](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/1666)
-- **IN-PROGRESS** [Prototype reference implementation, instrument key metrics](https://gitlab.com/gitlab-org/opstrace/opstrace/-/merge_requests/1823)
-- [Benchmark Clickhouse and/or proposed schemas, gather expert advice from Clickhouse Inc.](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/1666)
-- Develop write path(s) - `remote_write` API
-- Develop read path(s) - `remote_read` API, `PromQL`-based querier.
-- Setup testbed(s) for repeatable benchmarking/testing
-- Schema design and/or application server improvements if needed
-- Production Readiness v1.0-alpha/beta
-- Implement vanguarded/staged rollouts
-- Run extended alpha/beta testing
-- Release v1.0
diff --git a/doc/architecture/blueprints/gitlab_observability_backend/supported-deployments.png b/doc/architecture/blueprints/gitlab_observability_backend/supported-deployments.png
deleted file mode 100644
index 9dccc515129..00000000000
--- a/doc/architecture/blueprints/gitlab_observability_backend/supported-deployments.png
+++ /dev/null
Binary files differ
diff --git a/doc/architecture/blueprints/observability_metrics/index.md b/doc/architecture/blueprints/observability_metrics/index.md
new file mode 100644
index 00000000000..25a3b72a989
--- /dev/null
+++ b/doc/architecture/blueprints/observability_metrics/index.md
@@ -0,0 +1,286 @@
+---
+status: proposed
+creation-date: "2022-11-09"
+authors: [ "@ankitbhatnagar" ]
+coach: "@mappelman"
+approvers: [ "@sguyon", "@nicholasklick" ]
+owning-stage: "~monitor::observability"
+participating-stages: []
+---
+
+<!-- vale gitlab.FutureTense = NO -->
+
+# GitLab Observability - Metrics
+
+## Summary
+
+Developing a multi-user system to store & query observability data typically formatted in widely accepted, industry-standard formats such as OpenTelemetry using Clickhouse as the underlying storage with support for long-term data retention and aggregation.
+
+## Motivation
+
+From the six pillars of Observability, commonly abbreviated as `TEMPLE` - Traces, Events, Metrics, Profiles, Logs & Errors, Metrics constitute one of the most important of those for modern day systems helping their users gather insights about the operational posture of monitored systems.
+
+Metrics which are commonly structured as timeseries data have the following characteristics:
+
+- indexed by their corresponding timestamps;
+- continuously expanding in size;
+- usually aggregated, down-sampled, and queried in ranges; and
+- have very write-intensive requirements.
+
+Within GitLab Observability Backend, we aim to add the support for our customers to ingest and query observability data around their systems & applications, helping them improve the operational health of their systems.
+
+### Goals
+
+With the development of the proposed system, we have the following goals:
+
+- Scalable, low latency & cost-effective monitoring system backed by Clickhouse whose performance has been proven via repeatable benchmarks.
+
+- Support for long-term storage for metrics, ingested via an OpenTelemetry-compliant agent and queried via GitLab-native UI with probable support for metadata and exemplars.
+
+The aforementioned goals can further be broken down into the following four sub-goals:
+
+#### Ingesting data
+
+- For the system to be capable of ingesting large volumes of writes and reads, we aim to ensure that it must be horizontally scalable & provide durability guarantees to ensure no writes are dropped once ingested.
+
+#### Persisting data
+
+- We aim to support ingesting telemetry/data instrumented using OpenTelemetry specifications. For a first iteration, any persistence we design for our dataset will be multi-tenant by default, ensuring we can store observability data for multiple groups/projects within the same storage backend.
+
+#### Reading data
+
+- We aim to support querying data via a GitLab-native UX which would mean using a custom DSL/Query Builder sending API requests to our backend which would then translate them into Clickhouse SQL. From our internal discussions around this, [Product Analytics Visualisation Designer](https://gitlab.com/gitlab-org/gitlab-services/design.gitlab.com/-/analytics/dashboards/visualization-designer) is a good source of inspiration for this.
+
+#### Deleting data
+
+- We aim to support being able to delete any ingested data should such a need arise. This is also in addition to us naturally deleting data when a configured TTL expires and/or respective retention policies are enforced. We must, within our schemas, build a way to delete data by labels OR their content, also add to our offering the necessary tooling to do so.
+
+### Non-Goals
+
+With the goals established above, we also want to establish what specific things are non-goals with the current proposal. They are:
+
+- With our first iteration here, we do not aim to support querying ingested telemetry via [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) deferring that to as & when such a business need arises. However, users will be able to ingest their metrics using the OpenTelemetry Line Protocol (OTLP), e.g. via the [Prometheus Receiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/prometheusreceiver/README.md) in case of Prometheus metrics.
+
+## Proposal
+
+We intend to use GitLab Observability Backend (GOB) as a framework for the Metrics implementation so that its lifecycle can be managed via already established components of our backend.
+
+![Architecture](metrics_indexing_at_ingestion.png)
+
+As depicted in the diagram above, an OTEL-collector pipeline, indexer & query service are components that need to be developed as proposed here while the remaining peripheral components either already exist or can be provisioned via existing code in our centralised `scheduler` within GOB.
+
+**On the write path**:
+
+- We expect to receive incoming data via `HTTP/JSON` similar to what we do for our existing services, e.g. errortracking, tracing.
+
+- We aim to heavily deduplicate incoming timeseries by indexing/caching per-series metadata to reduce our storage footprint.
+
+- We aim to ensure avoiding writing a lot of small writes into Clickhouse by batching data before writing it into Clickhouse.
+
+**On the read path**:
+
+![MetricsReadPath](metrics-read-path.png)
+
+- We aim to allow our users to use GitLab itself to read ingested data, which will necessitate building a dedicated `Query Service` on our backend to be able to service API requests originating from GitLab.
+
+- We aim implement necessary query validation, sanitation and rate-limiting for any resource consumption to ensure underlying systems remain in good operational health at all times.
+
+### GitLab Observability Tenant
+
+With the recent changes to our backend design especially around deprecating the use of a Grafana-based UX, we have found opportunities to streamline how we provision tenants within our system. This initiative had led to the development of a custom CR - `GitLabObservabilityTenant` intended to model a dedicated set of resources **per top-level GitLab namespace**. From a scalability perspective, this means we deploy a dedicated instance of `Ingress` & `Ingester` per top-level GitLab namespace to make sure we can scale each tenant subject to traffic volumes of its respective groups & projects. It also helps isolate resource consumption across tenants in an otherwise multi-tenant system such as ours.
+
+### Indexing per-series metadata
+
+As an internal part of the `ingester`, we aim to index per-series labels and/or metadata to be able to deduplicate incoming timeseries data and segregate them into metadata and points-data. This helps reduce our storage footprint by an order of magnitude keeping total cost of operation low. This indexed data can also be consumed by the `Query Service` to efficiently compute timeseries for all incoming read requests. This part of our architecture is also described in more detail in [Proposal: Indexing metrics labels for efficiently deduplicating & querying time series data](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/2397).
+
+### Query Service
+
+The `Query Service` consists of two primary components - 1. a request parser & 2. a backend-specific querier implementation. On the request path, once its received on the designated endpoint(s), it is handled by a handler which is a part of the request parser. The parser's responsibility is to unmarshal incoming query payloads, validate the contents and produce a `SearchContext` object which describes how must this query/request be processed. Within a `SearchContext` object is a `QueryContext` attribute which further defines one or more `Query` objects - each a completely independent data query against one of our backends.
+
+![QueryServiceInternals](query-service-internals.png)
+
+#### API structure
+
+For the user-facing API, we intend to add support via HTTP/JSON endpoint(s) with user-queries marshalled as payloads within a request body. For example, to compute the sum of a minutely delta of metric:`apiserver_request_total` over all values of label:`instance`, you'd send a POST request to `https://observe.gitlab.com/query/$GROUP/$PROJECT/metrics` with the following as body:
+
+```json
+{
+ "queries": {
+ "A": {
+ "type": "metrics",
+ "filters": [
+ {
+ "key": "__name__",
+ "value": "apiserver_request_total",
+ "operator": "eq"
+ }
+ ],
+ "aggregation": {
+ "function": "rate",
+ "interval": "1m"
+ },
+ "groupBy": {
+ "attribute": [
+ "instance"
+ ],
+ "function": "sum"
+ },
+ "sortBy": {},
+ "legend": {}
+ }
+ },
+ "expression": "A"
+}
+```
+
+#### Query representation as an AST
+
+```plaintext
+type SearchContext struct {
+ UserContext *UserContext `json:"authContext"`
+ BackendContext *BackendContext `json:"backendContext"`
+
+ StartTimestamp int64 `json:"start"`
+ EndTimestamp int64 `json:"end"`
+ StepIntervalSeconds int64 `json:"step"`
+
+ QueryContext *QueryContext `json:"queryContext"`
+ CorrelationContext *CorrelationContext `json:"correlationContext"`
+ Variables map[string]interface{} `json:"variables,omitempty"`
+}
+```
+
+Generally speaking:
+
+- `SearchContext` defines how a search must be executed.
+ - It internally contains a `QueryContext` which points to one or more `Query`(s) each targeting a given backend.
+ - Each `Query` must be parsed & processed independently, supplemented by other common attributes within a `QueryContext` or `SearchContext`.
+
+- `Query` defines an AST-like object which describes how must a query be performed.
+ - It is intentionally schema-agnostic allowing it to be serialised and passed around our system(s).
+ - It is also an abstraction that hides details of how we model data internal to our databases from the querying entity.
+ - Assuming an incoming query can be parsed & validated into a `Query` object, a `Querier` can execute a search/query against it.
+
+- `UserContext` defines if a request has access to the data being searched for.
+ - It is perhaps a good place to model & enforce request quotas, rate-limiting, etc.
+ - Populating parts of this attribute depend on the parser reading other global state via the API gateway or Gatekeeper.
+
+- `BackendContext` defines which backend must a request be processed against.
+ - It helps route requests to an appropriate backend in a multitenant environment.
+ - For this iteration though, we intend to work with only one backend as is the case with our architecture.
+
+- `CorrelationContext` defines how multiple queries can be correlated to each other to build a cohesive view on the frontend.
+ - For this iteration though, we intend to keep it empty and only work on adding correlation vectors later.
+
+## Intended target-environments
+
+Keeping inline with our current operational structure, we intend to deploy the metrics offering as a part of GitLab Observability Backend, deployed on the following two target environments:
+
+- kind cluster (for local development)
+- GKE cluster (for staging/production environments)
+
+## Production Readiness
+
+### Batching
+
+Considering we'll need to batch data before ingesting large volumes of small writes into Clickhouse, the design must account for app-local persistence to allow it to locally batch incoming data before landing it into Clickhouse in batches of a predetermined size in order to increase performance and allow the table engine to continue to persist data successfully.
+
+We have considered the following alternatives to implement app-local batching:
+
+- In-memory - non durable
+- BadgerDB - durable, embedded, performant
+- Redis - trivial, external dependency
+- Kafka - non-trivial, external dependency but it can augment multiple other use-cases and help other problem domains at GitLab.
+
+**Note**: Similar challenges have also surfaced with the CH interactions `errortracking` - the subsystem has in its current implementation. There have been multiple attempts to solve this problem domain in the past - [this MR](https://gitlab.com/gitlab-org/opstrace/opstrace/-/merge_requests/1660) implemented an in-memory alternative while [this one](https://gitlab.com/gitlab-org/opstrace/opstrace/-/merge_requests/1767) attempted an on-disk alternative.
+
+Any work done in this area of concern would also benefit other subsystems such as errortracking, logging, etc.
+
+### Scalability
+
+We intend to start testing the proposed implementation with 10K metric-points per second to test/establish our initial hypothesis, though ideally, we must design the underlying backend for 1M points ingested per second.
+
+### Benchmarking
+
+We propose the following three dimensions be tested while benchmarking the proposed implementation:
+
+- Data ingest performance (functional)
+- Mean query response times (functional)
+- Storage requirements (operational)
+
+For understanding performance, we'll need to first compile a list of such queries given the data we ingest for our tests. Clickhouse query logging is super helpful while doing this.
+
+NOTE:
+Ideally, we aim to benchmark the system to be able to ingest >1M metric points/sec while consistently serving most queries under <1 sec.
+
+### Past work & references
+
+- [Benchmark ClickHouse for metrics](https://gitlab.com/gitlab-org/opstrace/opstrace/-/issues/1666)
+- [Incubation:APM ClickHouse evaluation](https://gitlab.com/gitlab-org/incubation-engineering/apm/apm/-/issues/4)
+- [Incubation:APM ClickHouse metrics schema](https://gitlab.com/gitlab-org/incubation-engineering/apm/apm/-/issues/10)
+- [Our research around TimescaleDB](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/14137)
+- [Current Workload on our Thanos-based setup](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/15420#current-workload)
+- [Scaling-200m-series](https://opstrace.com/blog/scaling-200m-series)
+
+### Cost-estimation
+
+- We aim to make sure the system is cost-effective to our users for ingesting & querying telemetry data. One of the more significant factors affecting underlying costs are how we model & store ingested data which the intended proposal must optimize for by measures such as reducing data redundancy, pruning unused metrics, etc.
+
+- We must consider the usage of multiple storage medium(s), especially:
+ - Tiered storage
+ - Object storage
+
+### Tooling
+
+As an overarching outcome here, we aim to build the necessary tooling and/or telemetry around ingested data to enable all user personas to have visibility into high cardinality metrics to help prune or drop unused metrics. It'd be prudent to have usage statistics e.g. per-metric scrape frequencies, to make sure our end-users are not ingesting data at a volume they do not need and/or find useful.
+
+## Future iterations
+
+### Linkage across telemetry pillars, exemplars
+
+We must build the metrics system in a way to be able cross-reference ingested data with other telemetry pillars, such as traces, logs and errors, so as to provide a more holistic view of all instrumentation a system sends our way.
+
+### Support for user-defined SQL queries to aggregate data and/or generate materialized views
+
+We should allow users of the system to be able to run user-defined, ad-hoc queries similar to how Prometheus recording rules help generate custom metrics from existing ones.
+
+### Support for scalable data ingestion
+
+We believe that should we feel the need to start buffering data local to the ingestion application and/or move away from Clickhouse for persisting data, on-disk WALs would be a good direction to proceed into given their prevelant usage among other monitoring systems.
+
+### Query Service features
+
+- Adding support for compound queries and/or expressions.
+- Consolidation of querying capabilities for tracing, logs & errortracking via the query engine.
+- Using the query engine to build integrations such as alerting.
+- Adding support for other monitoring/querying standards such as PromQL, MetricQL, OpenSearch, etc
+- Adding automated insights around metric cardinality & resource consumption.
+
+## Planned roadmap
+
+The following section enlists how we intend to implement the aforementioned proposal around building Metrics support into GitLab Observability Service. Each corresponding document and/or issue contains further details of how each next step is planned to be executed.
+
+### 16.5
+
+- Research & draft design proposal and/or requirements.
+- Produce architectural blueprint, open for feedback.
+
+### 16.6
+
+- Develop support for OpenTelemetry-based ingestion.
+- Develop support for querying data; begin with an API to list all ingested metrics scoped to a given tenant.
+- Develop support for displaying a list of ingested metrics within GitLab UI.
+- Release Experimental version.
+
+### 16.7
+
+- Develop support for querying data, add metrics search endpoints for supported metric-types.
+- Develop our first iteration of the query builder, enable querying backend APIs.
+- Develop a metrics details page with the ability to graph data returned via backend APIs.
+- Setup testing, ensure repeatable benchmarking/testing can be performed.
+- Release Beta version, open for early usage by internal and external customers.
+
+### 16.9 (Gap to allow for user feedback for GA release)
+
+- Develop end-to-end testing, complete necessary production readiness, address feedback from users.
+- Release GA version.
diff --git a/doc/architecture/blueprints/observability_metrics/metrics-read-path.png b/doc/architecture/blueprints/observability_metrics/metrics-read-path.png
new file mode 100644
index 00000000000..f92d601a708
--- /dev/null
+++ b/doc/architecture/blueprints/observability_metrics/metrics-read-path.png
Binary files differ
diff --git a/doc/architecture/blueprints/observability_metrics/metrics_indexing_at_ingestion.png b/doc/architecture/blueprints/observability_metrics/metrics_indexing_at_ingestion.png
new file mode 100644
index 00000000000..6ce37588cff
--- /dev/null
+++ b/doc/architecture/blueprints/observability_metrics/metrics_indexing_at_ingestion.png
Binary files differ
diff --git a/doc/architecture/blueprints/observability_metrics/query-service-internals.png b/doc/architecture/blueprints/observability_metrics/query-service-internals.png
new file mode 100644
index 00000000000..f59d7096194
--- /dev/null
+++ b/doc/architecture/blueprints/observability_metrics/query-service-internals.png
Binary files differ
diff --git a/doc/update/deprecations.md b/doc/update/deprecations.md
index c32bc160feb..073f7a452a2 100644
--- a/doc/update/deprecations.md
+++ b/doc/update/deprecations.md
@@ -1773,6 +1773,20 @@ GitLab 16.0.
<div class="deprecation breaking-change" data-milestone="16.0">
+### GitLab administrators must have permission to modify protected branches or tags
+
+<div class="deprecation-notes">
+- Announced in GitLab <span class="milestone">16.0</span>
+- Removal in GitLab <span class="milestone">16.0</span> ([breaking change](https://docs.gitlab.com/ee/update/terminology.html#breaking-change))
+- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/12776).
+</div>
+
+GitLab administrators can no longer perform actions on protected branches or tags unless they have been explicitly granted that permission. These actions include pushing and merging into a [protected branch](https://docs.gitlab.com/ee/user/project/protected_branches.html), unprotecting a branch, and creating [protected tags](https://docs.gitlab.com/ee/user/project/protected_tags.html).
+
+</div>
+
+<div class="deprecation breaking-change" data-milestone="16.0">
+
### GitLab self-monitoring project
<div class="deprecation-notes">
@@ -2395,6 +2409,20 @@ GitLab's operational container scanning capabilities no longer require starboard
</div>
+<div class="deprecation " data-milestone="16.0">
+
+### Stop publishing GitLab Runner images based on Windows Server 2004 and 20H2
+
+<div class="deprecation-notes">
+- Announced in GitLab <span class="milestone">16.0</span>
+- Removal in GitLab <span class="milestone">16.0</span>
+- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/31001).
+</div>
+
+As of GitLab 16.0, GitLab Runner images based on Windows Server 2004 and 20H2 will not be provided as these operating systems are end-of-life.
+
+</div>
+
<div class="deprecation breaking-change" data-milestone="16.0">
### Support for Praefect custom metrics endpoint configuration
@@ -2781,6 +2809,20 @@ For updates and details about this deprecation, follow [this epic](https://gitla
GitLab self-managed customers can still use the feature [with a feature flag](https://docs.gitlab.com/ee/update/deprecations.html#self-managed-certificate-based-integration-with-kubernetes).
</div>
+
+<div class="deprecation breaking-change" data-milestone="15.9">
+
+### `omniauth-authentiq` gem no longer available
+
+<div class="deprecation-notes">
+- Announced in GitLab <span class="milestone">15.9</span>
+- Removal in GitLab <span class="milestone">15.9</span> ([breaking change](https://docs.gitlab.com/ee/update/terminology.html#breaking-change))
+- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/389452).
+</div>
+
+`omniauth-authentiq` is an OmniAuth strategy gem that was part of GitLab. The company providing authentication services, Authentiq, has shut down. Therefore the gem is being removed.
+
+</div>
</div>
<div class="milestone-wrapper" data-milestone="15.7">
@@ -2802,6 +2844,20 @@ Previously, variables that referenced or applied alias file variables expanded t
This breaking change fixes this issue but could disrupt user workflows that work around the behavior. With this change, job variable expansions that reference or apply alias file variables, expand to the file name or path of the `File` type variable, instead of its value, such as the file contents.
</div>
+
+<div class="deprecation " data-milestone="15.7">
+
+### Flowdock integration
+
+<div class="deprecation-notes">
+- Announced in GitLab <span class="milestone">15.7</span>
+- Removal in GitLab <span class="milestone">15.7</span>
+- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/379197).
+</div>
+
+As of December 22, 2022, we are removing the Flowdock integration because the service was shut down on August 15, 2022.
+
+</div>
</div>
<div class="milestone-wrapper" data-milestone="15.6">
@@ -4336,6 +4392,22 @@ Configuring the `per_repository` Gitaly election strategy is [deprecated](https:
This change is part of regular maintenance to keep our codebase clean.
</div>
+
+<div class="deprecation breaking-change" data-milestone="14.9">
+
+### Integrated error tracking disabled by default
+
+<div class="deprecation-notes">
+- Announced in GitLab <span class="milestone">14.9</span>
+- Removal in GitLab <span class="milestone">14.9</span> ([breaking change](https://docs.gitlab.com/ee/update/terminology.html#breaking-change))
+- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/353639).
+</div>
+
+In GitLab 14.4, GitLab released an integrated error tracking backend that replaces Sentry. This feature caused database performance issues. In GitLab 14.9, integrated error tracking is removed from GitLab.com, and turned off by default in GitLab self-managed. While we explore the future development of this feature, please consider switching to the Sentry backend by [changing your error tracking to Sentry in your project settings](https://docs.gitlab.com/ee/operations/error_tracking.html#sentry-error-tracking).
+
+For additional background on this removal, please reference [Disable Integrated Error Tracking by Default](https://gitlab.com/groups/gitlab-org/-/epics/7580). If you have feedback please add a comment to [Feedback: Removal of Integrated Error Tracking](https://gitlab.com/gitlab-org/gitlab/-/issues/355493).
+
+</div>
</div>
<div class="milestone-wrapper" data-milestone="14.8">
diff --git a/doc/user/project/integrations/index.md b/doc/user/project/integrations/index.md
index 6fb30c43b23..5809f62e933 100644
--- a/doc/user/project/integrations/index.md
+++ b/doc/user/project/integrations/index.md
@@ -115,36 +115,36 @@ To use custom settings for a project or group integration:
|-----------------------------------------------------------------------------|-----------------------------------------------------------------------|------------------------|
| [Asana](asana.md) | Add commit messages as comments to Asana tasks. | **{dotted-circle}** No |
| Assembla | Manage projects. | **{dotted-circle}** No |
-| [Atlassian Bamboo CI](bamboo.md) | Run CI/CD pipelines with Atlassian Bamboo. | **{check-circle}** Yes |
+| [Atlassian Bamboo](bamboo.md) | Run CI/CD pipelines with Atlassian Bamboo. | **{check-circle}** Yes |
| [Bugzilla](bugzilla.md) | Use Bugzilla as the issue tracker. | **{dotted-circle}** No |
| Buildkite | Run CI/CD pipelines with Buildkite. | **{check-circle}** Yes |
| Campfire | Connect to chat. | **{dotted-circle}** No |
-| [ClickUp](clickup.md) | Use ClickUp as the issue tracker. | **{dotted-circle}** No |
+| [ClickUp](clickup.md) | Use ClickUp as the issue tracker. | **{dotted-circle}** No |
| [Confluence Workspace](../../../api/integrations.md#confluence-integration) | Use Confluence Cloud Workspace as an internal wiki. | **{dotted-circle}** No |
| [Custom issue tracker](custom_issue_tracker.md) | Use a custom issue tracker. | **{dotted-circle}** No |
| [Datadog](../../../integration/datadog.md) | Trace your GitLab pipelines with Datadog. | **{check-circle}** Yes |
| [Discord Notifications](discord_notifications.md) | Send notifications about project events to a Discord channel. | **{dotted-circle}** No |
-| Drone CI | Run CI/CD pipelines with Drone. | **{check-circle}** Yes |
+| Drone | Run CI/CD pipelines with Drone. | **{check-circle}** Yes |
| [Emails on push](emails_on_push.md) | Send commits and diff of each push by email. | **{dotted-circle}** No |
-| [EWM](ewm.md) | Use IBM Engineering Workflow Management as the issue tracker. | **{dotted-circle}** No |
+| [Engineering Workflow Management (EWM)](ewm.md) | Use IBM Engineering Workflow Management as the issue tracker. | **{dotted-circle}** No |
| [External wiki](../wiki/index.md#link-an-external-wiki) | Link an external wiki. | **{dotted-circle}** No |
| [GitHub](github.md) | Obtain statuses for commits and pull requests. | **{dotted-circle}** No |
+| [GitLab for Slack app](gitlab_slack_application.md) | Use Slack's official GitLab application. | **{dotted-circle}** No |
| [Google Chat](hangouts_chat.md) | Send notifications from your GitLab project to a room in Google Chat. | **{dotted-circle}** No |
| [Harbor](harbor.md) | Use Harbor as the container registry. | **{dotted-circle}** No |
| [irker (IRC gateway)](irker.md) | Send IRC messages. | **{dotted-circle}** No |
| [Jenkins](../../../integration/jenkins.md) | Run CI/CD pipelines with Jenkins. | **{check-circle}** Yes |
-| JetBrains TeamCity CI | Run CI/CD pipelines with TeamCity. | **{check-circle}** Yes |
+| JetBrains TeamCity | Run CI/CD pipelines with TeamCity. | **{check-circle}** Yes |
| [Jira](../../../integration/jira/index.md) | Use Jira as the issue tracker. | **{dotted-circle}** No |
| [Mattermost notifications](mattermost.md) | Send notifications about project events to Mattermost channels. | **{dotted-circle}** No |
| [Mattermost slash commands](mattermost_slash_commands.md) | Perform common tasks with slash commands. | **{dotted-circle}** No |
| [Microsoft Teams notifications](microsoft_teams.md) | Receive event notifications. | **{dotted-circle}** No |
| Packagist | Keep your PHP dependencies updated on Packagist. | **{check-circle}** Yes |
-| [Pipelines emails](pipeline_status_emails.md) | Send the pipeline status to a list of recipients by email. | **{dotted-circle}** No |
+| [Pipeline status emails](pipeline_status_emails.md) | Send the pipeline status to a list of recipients by email. | **{dotted-circle}** No |
| [Pivotal Tracker](pivotal_tracker.md) | Add commit messages as comments to Pivotal Tracker stories. | **{dotted-circle}** No |
| [Pumble](pumble.md) | Send event notifications to a Pumble channel. | **{dotted-circle}** No |
| Pushover | Get real-time notifications on your device. | **{dotted-circle}** No |
| [Redmine](redmine.md) | Use Redmine as the issue tracker. | **{dotted-circle}** No |
-| [GitLab for Slack app](gitlab_slack_application.md) | Use Slack's official GitLab application. | **{dotted-circle}** No |
| [Slack slash commands](slack_slash_commands.md) | Enable slash commands in a workspace. | **{dotted-circle}** No |
| [Squash TM](squash_tm.md) | Update Squash TM requirements when GitLab issues are modified. | **{check-circle}** Yes |
| [Telegram](telegram.md) | Send notifications about project events to Telegram. | **{dotted-circle}** No |
@@ -181,5 +181,6 @@ You can disable SSL verification when you configure
## Related topics
+- [Integrations API](../../../api/integrations.md)
- [Integration development guidelines](../../../development/integrations/index.md)
- [GitLab Developer Portal](https://developer.gitlab.com)
diff --git a/doc/user/storage_management_automation.md b/doc/user/storage_management_automation.md
index 9a505d23597..c71ec74d9e1 100644
--- a/doc/user/storage_management_automation.md
+++ b/doc/user/storage_management_automation.md
@@ -7,11 +7,12 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Storage management automation **(FREE ALL)**
-You can manage your storage through the GitLab UI and the API. This page describes how to
-automate storage analysis and cleanup to manage your [usage quota](usage_quotas.md). You can also
-manage your storage usage by making your pipelines more efficient. For more information, see [pipeline efficiency](../ci/pipelines/pipeline_efficiency.md).
+This page describes how to automate storage analysis and cleanup to manage your storage usage
+with the GitLab REST API.
-You can also use the [GitLab community forum and Discord](https://about.gitlab.com/community/) to ask for help with API automation.
+You can also manage your storage usage by improving [pipeline efficiency](../ci/pipelines/pipeline_efficiency.md).
+
+For more help with API automation, you can also use the [GitLab community forum and Discord](https://about.gitlab.com/community/).
## API requirements
@@ -30,15 +31,20 @@ You must use the following scopes to [authenticate](../api/rest/index.md#authent
You can use command-line tools or a programming language to interact with the REST API.
-### Command line
+### Command line tools
+
+To send API requests, install either:
+
+- curl with your preferred package manager.
+- [GitLab CLI](../editor_extensions/gitlab_cli/index.md) and use the `glab api` subcommand.
+
+To format JSON responses, install `jq`. For more information, see [Tips for productive DevOps workflows: JSON formatting with jq and CI/CD linting automation](https://about.gitlab.com/blog/2021/04/21/devops-workflows-json-format-jq-ci-cd-lint/).
-You must install the following tools to send API requests:
+To use these tools with the REST API:
-- Install `curl` with your preferred package manager.
-- Install the [GitLab CLI](../editor_extensions/gitlab_cli/index.md) and use the `api` subcommand.
-- Install `jq` to format JSON responses. For more information, see [Tips for productive DevOps workflows: JSON formatting with jq and CI/CD linting automation](https://about.gitlab.com/blog/2021/04/21/devops-workflows-json-format-jq-ci-cd-lint/).
+::Tabs
-Example with `curl` and `jq`:
+:::TabTitle curl
```shell
export GITLAB_TOKEN=xxx
@@ -46,7 +52,7 @@ export GITLAB_TOKEN=xxx
curl --silent --header "Authorization: Bearer $GITLAB_TOKEN" "https://gitlab.com/api/v4/user" | jq
```
-Example with the [GitLab CLI](../editor_extensions/gitlab_cli/index.md):
+:::TabTitle GitLab CLI
```shell
glab auth login
@@ -54,18 +60,25 @@ glab auth login
glab api groups/YOURGROUPNAME/projects
```
+::EndTabs
+
#### Using the GitLab CLI
Some API endpoints require [pagination](../api/rest/index.md#pagination) and subsequent page fetches to retrieve all results. The [GitLab CLI](../editor_extensions/gitlab_cli/index.md) provides the flag `--paginate`.
-Requests that require sending a POST body formatted as JSON data can be written as `key=value` pairs passed to the `--raw-field` parameter.
+Requests that require a POST body formatted as JSON data can be written as `key=value` pairs passed to the `--raw-field` parameter.
For more information, see the [GitLab CLI endpoint documentation](../editor_extensions/gitlab_cli/index.md#core-commands).
### API client libraries
-The storage management and cleanup automation methods described in this page use the [`python-gitlab`](https://python-gitlab.readthedocs.io/en/stable/) library in programmatic example. The `python-gitlab` library provides
-a feature-rich programming interface. For more information about use cases for the `python-gitlab` library,
+The storage management and cleanup automation methods described in this page use:
+
+- The [`python-gitlab`](https://python-gitlab.readthedocs.io/en/stable/) library, which provides
+a feature-rich programming interface.
+- The `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` script in the [GitLab API with Python](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/) project.
+
+For more information about use cases for the `python-gitlab` library,
see [Efficient DevSecOps workflows: Hands-on `python-gitlab` API automation](https://about.gitlab.com/blog/2023/02/01/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation/).
For more information about other API client libraries, see [Third-party clients](../api/rest/index.md#third-party-clients).
@@ -92,7 +105,11 @@ This data provides insight into storage consumption of the project by the follow
Additional queries are required for detailed storage statistics for [job artifacts](../api/job_artifacts.md), the [container registry](../api/container_registry.md), the [package registry](../api/packages.md) and [dependency proxy](../api/dependency_proxy.md). It is explained later in this how-to.
-Example that uses `curl` and `jq` on the command line:
+To identify storage types:
+
+::Tabs
+
+:::TabTitle curl
```shell
curl --silent --header "Authorization: Bearer $GITLAB_TOKEN" "https://gitlab.com/api/v4/projects/$GL_PROJECT_ID?statistics=true" | jq --compact-output '.id,.statistics' | jq
@@ -111,7 +128,7 @@ curl --silent --header "Authorization: Bearer $GITLAB_TOKEN" "https://gitlab.com
}
```
-Example that uses the [GitLab CLI](../editor_extensions/gitlab_cli/index.md):
+:::TabTitle GitLab CLI
```shell
export GL_PROJECT_ID=48349590
@@ -131,7 +148,7 @@ glab api --method GET projects/$GL_PROJECT_ID --field 'statistics=true' | jq --c
}
```
-Example using the `python-gitlab` library:
+:::TabTitle Python
```python
project_obj = gl.projects.get(project.id, statistics=True)
@@ -139,7 +156,9 @@ project_obj = gl.projects.get(project.id, statistics=True)
print("Project {n} statistics: {s}".format(n=project_obj.name_with_namespace, s=json.dump(project_obj.statistics, indent=4)))
```
-You can find an example implementation in the script `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` which is located in the [GitLab API with Python project](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/). Export the `GL_GROUP_ID` environment variable and run the script to see the project statistics printed in the terminal.
+::EndTabs
+
+To see the project statistics printed in the terminal, export the `GL_GROUP_ID` environment variable and run the script.
```shell
export GL_TOKEN=xxx
@@ -175,7 +194,17 @@ Here's an example of an algorithm that analyzes multiple subgroups and projects:
1. Identify the storage type to analyze, and collect the information from project attributes, like project statistics, and job artifacts.
1. Print an overview of all projects, grouped by group, and their storage information.
-Example with the [GitLab CLI](../editor_extensions/gitlab_cli/index.md):
+The shell approach with `glab` might be more suitable for smaller analyses. For larger analyses, you should consider a script that
+uses the API client libraries. This script improves readability, data storage, flow control, testing, and reusability.
+
+To ensure the script doesn't reach [API rate limits](../api/rest/index.md#rate-limits), the following
+example code is not optimized for parallel API requests.
+
+To implement this algorithm:
+
+::Tabs
+
+:::TabTitle GitLab CLI
```shell
export GROUP_NAME="gitlab-de"
@@ -221,10 +250,7 @@ glab api projects/48349590/jobs | jq --compact-output '.[]' | jq --compact-outpu
[{"file_type":"archive","size":1049089,"filename":"artifacts.zip","file_format":"zip"},{"file_type":"metadata","size":157,"filename":"metadata.gz","file_format":"gzip"},{"file_type":"trace","size":3140,"filename":"job.log","file_format":null}]
```
-While the shell approach with `glab` works for smaller analysis, you should consider a script that
-uses the API client libraries. This improves readability, storing data, flow control, testing, and reusability.
-
-You can also implement this algorithm with a Python script that uses the `python-gitlab` library:
+:::TabTitle Python
```python
#!/usr/bin/env python
@@ -266,6 +292,8 @@ if __name__ == "__main__":
print("DEBUG: ID {i}: {a}".format(i=job.id, a=job.attributes['artifacts']))
```
+::EndTabs
+
The script outputs the project job artifacts in a JSON formatted list:
```json
@@ -291,8 +319,6 @@ The script outputs the project job artifacts in a JSON formatted list:
]
```
-The full script `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` with specific examples for automating storage management and cleanup is located is located in the [GitLab API with Python](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/) project. To ensure the script doesn't reach [API rate limits](../api/rest/index.md#rate-limits), the example code is not optimized for parallel API requests.
-
### Helper functions
You may need to convert timestamp seconds into a duration format, or print raw bytes in a more
@@ -430,8 +456,6 @@ $ python3 get_all_projects_top_level_namespace_storage_analysis_cleanup_example.
| [gitlab-de/playground/artifact-gen-group/gen-job-artifacts-4](Gen Job Artifacts 4) | 4828297945 | job.log | trace | 0.0030 |
```
-The full example of the script `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` is located in the [GitLab API with Python project](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/). To ensure the script doesn't hit [API rate limits](../api/rest/index.md#rate-limits), the example code is not optimized for parallel API requests.
-
### Delete job artifacts
You can use a filter to select the types of job artifacts to delete in bulk. A typical request:
@@ -489,8 +513,6 @@ only. When the collection loops remove the object locks, all marked as deleted j
# Print collection summary (removed for readability)
```
-The full example of the script `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` is located in the [GitLab API Python project](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/).
-
#### Delete all job artifacts for a project
If you do not need the project's [job artifacts](../ci/jobs/job_artifacts.md), you can
@@ -500,7 +522,11 @@ Job artifact deletion happens asynchronously in GitLab and can take a while to c
The [artifacts for the most recent successful jobs](../ci/jobs/job_artifacts.md#keep-artifacts-from-most-recent-successful-jobs) are also kept by default.
-Example with curl:
+To delete all job artifacts for a project:
+
+::Tabs
+
+:::TabTitle curl
```shell
export GL_PROJECT_ID=48349590
@@ -508,7 +534,7 @@ export GL_PROJECT_ID=48349590
curl --silent --header "Authorization: Bearer $GITLAB_TOKEN" --request DELETE "https://gitlab.com/api/v4/projects/$GL_PROJECT_ID/artifacts"
```
-Example with the [GitLab CLI](../editor_extensions/gitlab_cli/index.md):
+:::TabTitle GitLab CLI
```shell
glab api --method GET projects/$GL_PROJECT_ID/jobs | jq --compact-output '.[]' | jq --compact-output '.id, .artifacts'
@@ -516,17 +542,19 @@ glab api --method GET projects/$GL_PROJECT_ID/jobs | jq --compact-output '.[]' |
glab api --method DELETE projects/$GL_PROJECT_ID/artifacts
```
-Example with the [`python-gitlab` library](https://python-gitlab.readthedocs.io/en/stable/gl_objects/pipelines_and_jobs.html#jobs):
+:::TabTitle Python
```python
project.artifacts.delete()
```
+::EndTabs
+
### Delete job logs
When you delete a job log you also [erase the entire job](../api/jobs.md#erase-a-job).
-Example with the [GitLab CLI](../editor_extensions/gitlab_cli/index.md):
+Example with the GitLab CLI:
```shell
glab api --method GET projects/$GL_PROJECT_ID/jobs | jq --compact-output '.[]' | jq --compact-output '.id'
@@ -555,8 +583,6 @@ that delete the job artifact.
time.sleep(1)
```
-The full example of the script `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` is located in the [GitLab API with Python project](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/).
-
Support for creating a retention policy for job logs is proposed in [issue 374717](https://gitlab.com/gitlab-org/gitlab/-/issues/374717).
### Inventory of job artifacts expiry settings
@@ -748,15 +774,17 @@ the `created_at` attribute to implement a similar algorithm that compares the jo
pipeline_obj.delete()
```
-The full example of the script `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` is located in the [GitLab API with Python project](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/).
-
Automatically deleting old pipelines in GitLab is tracked in [this feature proposal](https://gitlab.com/gitlab-org/gitlab/-/issues/338480).
## Manage storage for Container Registries
Container registries are available [in a project](../api/container_registry.md#within-a-project) or [in a group](../api/container_registry.md#within-a-group). Both locations require analysis and cleanup strategies.
-The following example uses using `curl` and `jq` for a project:
+To analyze and cleanup Container Registries in a project:
+
+::Tabs
+
+:::TabTitle curl
```shell
export GL_PROJECT_ID=48057080
@@ -771,7 +799,7 @@ curl --silent --header "Authorization: Bearer $GITLAB_TOKEN" "https://gitlab.com
3401613
```
-The following example uses the [GitLab CLI](../editor_extensions/gitlab_cli/index.md) for a project:
+:::TabTitle GitLab CLI
```shell
export GL_PROJECT_ID=48057080
@@ -794,6 +822,8 @@ glab api --method GET projects/$GL_PROJECT_ID/registry/repositories/4435617/tags
3401613
```
+::EndTabs
+
A similar automation shell script is created in the [delete old pipelines](#delete-old-pipelines) section.
The `python-gitlab` API library provides bulk deletion interfaces explained in the next section.
@@ -828,9 +858,6 @@ The following example uses the [`python-gitlab` API library](https://python-gitl
repository.tags.delete_in_bulk(name_regex_delete="v.+", keep_n=2)
```
-The full example of the script `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` is located
-in the [GitLab API with Python](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/) project.
-
### Cleanup policy for containers
Use the project REST API endpoint to [create cleanup policies](packages/container_registry/reduce_container_registry_storage.md#use-the-cleanup-policy-api). The following example uses the [GitLab CLI](../editor_extensions/gitlab_cli/index.md) to create a cleanup policy.
@@ -981,8 +1008,6 @@ Package size: 20.0033
Package size 20.0033 > threshold 10.0000, deleting package.
```
-The full example of the script `get_all_projects_top_level_namespace_storage_analysis_cleanup_example.py` is located in the [GitLab API with Python](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/) project.
-
### Dependency Proxy
Review the [cleanup policy](packages/dependency_proxy/reduce_dependency_proxy_storage.md#cleanup-policies) and how to [purge the cache using the API](packages/dependency_proxy/reduce_dependency_proxy_storage.md#use-the-api-to-clear-the-cache)
diff --git a/lib/api/helpers.rb b/lib/api/helpers.rb
index 9e04cf955df..3bbc1d71852 100644
--- a/lib/api/helpers.rb
+++ b/lib/api/helpers.rb
@@ -700,14 +700,17 @@ module API
Gitlab::AppLogger.warn("Redis tracking event failed for event: #{event_name}, message: #{error.message}")
end
- def track_event(event_name, user_id:, namespace_id: nil, project_id: nil)
- return unless user_id.present?
+ def track_event(event_name, user:, namespace_id: nil, project_id: nil)
+ return unless user.present?
+
+ namespace = Namespace.find(namespace_id) if namespace_id
+ project = Project.find(project_id) if project_id
Gitlab::InternalEvents.track_event(
event_name,
- user_id: user_id,
- namespace_id: namespace_id,
- project_id: project_id
+ user: user,
+ namespace: namespace,
+ project: project
)
rescue StandardError => e
Gitlab::ErrorTracking.track_and_raise_for_dev_exception(e, event_name: event_name)
diff --git a/lib/api/usage_data.rb b/lib/api/usage_data.rb
index 0a343093c33..a228aa1c45a 100644
--- a/lib/api/usage_data.rb
+++ b/lib/api/usage_data.rb
@@ -77,7 +77,7 @@ module API
track_event(
event_name,
- user_id: current_user.id,
+ user: current_user,
namespace_id: namespace_id,
project_id: project_id
)
diff --git a/lib/gitlab/bitbucket_server_import/importers/pull_request_importer.rb b/lib/gitlab/bitbucket_server_import/importers/pull_request_importer.rb
index 34963452192..0d4de385f5e 100644
--- a/lib/gitlab/bitbucket_server_import/importers/pull_request_importer.rb
+++ b/lib/gitlab/bitbucket_server_import/importers/pull_request_importer.rb
@@ -30,7 +30,7 @@ module Gitlab
reviewer_ids: reviewers,
source_project_id: project.id,
source_branch: Gitlab::Git.ref_name(object[:source_branch_name]),
- source_branch_sha: object[:source_branch_sha],
+ source_branch_sha: source_branch_sha,
target_project_id: project.id,
target_branch: Gitlab::Git.ref_name(object[:target_branch_name]),
target_branch_sha: object[:target_branch_sha],
@@ -68,6 +68,14 @@ module Gitlab
end
end
end
+
+ def source_branch_sha
+ source_branch_sha = project.repository.commit(object[:source_branch_sha])&.sha
+
+ return source_branch_sha if source_branch_sha
+
+ project.repository.find_commits_by_message(object[:source_branch_sha])&.first&.sha
+ end
end
end
end
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index a53c8f8b10a..c42a6d27f22 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -19436,6 +19436,30 @@ msgstr ""
msgid "Experiment features' settings not allowed."
msgstr ""
+msgid "ExperimentBadge|An Experiment is a feature that's in the process of being developed. It's not production-ready. We encourage users to try Experimental features and provide feedback."
+msgstr ""
+
+msgid "ExperimentBadge|An Experiment:"
+msgstr ""
+
+msgid "ExperimentBadge|Can be removed at any time."
+msgstr ""
+
+msgid "ExperimentBadge|Can cause data loss."
+msgstr ""
+
+msgid "ExperimentBadge|Experiment"
+msgstr ""
+
+msgid "ExperimentBadge|Has no support and might not be documented."
+msgstr ""
+
+msgid "ExperimentBadge|May be unstable."
+msgstr ""
+
+msgid "ExperimentBadge|What's an Experiment?"
+msgstr ""
+
msgid "Experiments"
msgstr ""
@@ -48097,7 +48121,7 @@ msgstr ""
msgid "This %{issuableDisplayName} is locked. Only project members can comment."
msgstr ""
-msgid "This %{issuable} is hidden because its author has been banned"
+msgid "This %{issuable} is hidden because its author has been banned."
msgstr ""
msgid "This %{issuable} is locked. Only %{strong_open}project members%{strong_close} can comment."
@@ -48430,15 +48454,12 @@ msgstr ""
msgid "This issue is currently blocked by the following issues:"
msgstr ""
-msgid "This issue is hidden because its author has been banned"
+msgid "This issue is hidden because its author has been banned."
msgstr ""
msgid "This issue is in a child epic of the filtered epic"
msgstr ""
-msgid "This issue is locked. Only project members can comment."
-msgstr ""
-
msgid "This job could not start because it could not retrieve the needed artifacts%{punctuation}%{invalid_dependencies}"
msgstr ""
@@ -48571,15 +48592,9 @@ msgstr ""
msgid "This merge request is from an internal project to a public project."
msgstr ""
-msgid "This merge request is hidden because its author has been banned"
-msgstr ""
-
msgid "This merge request is locked."
msgstr ""
-msgid "This merge request is locked. Only project members can comment."
-msgstr ""
-
msgid "This merge request was merged. To apply this suggestion, edit this file directly."
msgstr ""
diff --git a/qa/qa/tools/migrate_influx_data_to_gcs.rb b/qa/qa/tools/migrate_influx_data_to_gcs.rb
index 4933251511b..fe8ad4d8710 100644
--- a/qa/qa/tools/migrate_influx_data_to_gcs.rb
+++ b/qa/qa/tools/migrate_influx_data_to_gcs.rb
@@ -28,7 +28,7 @@ module QA
# Run Influx Migrator
# @param [Integer] the last x hours for which data is required
# @return [void]
- def self.run(range: 1)
+ def self.run(range: 6)
migrator = new(range)
QA::Runtime::Logger.info("Fetching Influx data for the last #{range} hours")
@@ -76,9 +76,8 @@ module QA
# @return void
def influx_to_csv(influx_bucket, stats_type, data_file_name)
all_runs = query_api.query(query: query(influx_bucket, stats_type))
- CSV.open(data_file_name, "wb") do |csv|
+ CSV.open(data_file_name, "wb", col_sep: '|') do |csv|
stats_array = stats_type == "test-stats" ? TEST_STATS_FIELDS : FABRICATION_STATS_FIELDS
- csv << stats_array.flatten
all_runs.each do |table|
table.records.each do |record|
csv << stats_array.map { |key| record.values[key] }
diff --git a/spec/features/merge_request/admin_views_hidden_merge_request_spec.rb b/spec/features/merge_request/admin_views_hidden_merge_request_spec.rb
index 0dbb42a633b..a20f75be9cf 100644
--- a/spec/features/merge_request/admin_views_hidden_merge_request_spec.rb
+++ b/spec/features/merge_request/admin_views_hidden_merge_request_spec.rb
@@ -16,12 +16,12 @@ RSpec.describe 'Admin views hidden merge request', feature_category: :insider_th
end
it 'shows a hidden merge request icon' do
- page.within('.detail-page-header-body') do
- tooltip = format(_('This %{issuable} is hidden because its author has been banned'),
- issuable: _('merge request'))
- expect(page).to have_css("div[data-testid='hidden'][title='#{tooltip}']")
- expect(page).to have_css('svg[data-testid="spam-icon"]')
- end
+ expect(page).to have_css 'svg[data-testid="spam-icon"]'
+
+ find('svg[data-testid="spam-icon"]').hover
+
+ expect(page).to have_text format(_('This %{issuable} is hidden because its author has been banned.'),
+ issuable: _('merge request'))
end
end
end
diff --git a/spec/features/merge_requests/admin_views_hidden_merge_requests_spec.rb b/spec/features/merge_requests/admin_views_hidden_merge_requests_spec.rb
index e7727fbb9dc..7e33946f713 100644
--- a/spec/features/merge_requests/admin_views_hidden_merge_requests_spec.rb
+++ b/spec/features/merge_requests/admin_views_hidden_merge_requests_spec.rb
@@ -17,7 +17,7 @@ RSpec.describe 'Admin views hidden merge requests', feature_category: :insider_t
it 'shows a hidden merge request icon' do
page.within("#merge_request_#{merge_request.id}") do
- tooltip = format(_('This %{issuable} is hidden because its author has been banned'),
+ tooltip = format(_('This %{issuable} is hidden because its author has been banned.'),
issuable: _('merge request'))
expect(page).to have_css("span[title='#{tooltip}']")
expect(page).to have_css('svg[data-testid="spam-icon"]')
diff --git a/spec/frontend/boards/board_card_inner_spec.js b/spec/frontend/boards/board_card_inner_spec.js
index 95b5712bab0..179614b08b6 100644
--- a/spec/frontend/boards/board_card_inner_spec.js
+++ b/spec/frontend/boards/board_card_inner_spec.js
@@ -235,7 +235,7 @@ describe('Board card component', () => {
expect(tooltip).toBeDefined();
expect(findHiddenIssueIcon().attributes('title')).toBe(
- 'This issue is hidden because its author has been banned',
+ 'This issue is hidden because its author has been banned.',
);
});
});
diff --git a/spec/frontend/issuable/components/hidden_badge_spec.js b/spec/frontend/issuable/components/hidden_badge_spec.js
new file mode 100644
index 00000000000..db2248bb2d2
--- /dev/null
+++ b/spec/frontend/issuable/components/hidden_badge_spec.js
@@ -0,0 +1,45 @@
+import { GlBadge, GlIcon } from '@gitlab/ui';
+import { shallowMount } from '@vue/test-utils';
+import { createMockDirective, getBinding } from 'helpers/vue_mock_directive';
+import HiddenBadge from '~/issuable/components/hidden_badge.vue';
+
+describe('HiddenBadge component', () => {
+ let wrapper;
+
+ const mountComponent = () => {
+ wrapper = shallowMount(HiddenBadge, {
+ directives: {
+ GlTooltip: createMockDirective('gl-tooltip'),
+ },
+ propsData: {
+ issuableType: 'issue',
+ },
+ });
+ };
+
+ const findBadge = () => wrapper.findComponent(GlBadge);
+ const findIcon = () => wrapper.findComponent(GlIcon);
+
+ beforeEach(() => {
+ mountComponent();
+ });
+
+ it('renders warning badge', () => {
+ expect(findBadge().text()).toBe('Hidden');
+ expect(findBadge().props('variant')).toEqual('warning');
+ });
+
+ it('renders spam icon', () => {
+ expect(findIcon().props('name')).toBe('spam');
+ });
+
+ it('has tooltip', () => {
+ expect(getBinding(wrapper.element, 'gl-tooltip')).not.toBeUndefined();
+ });
+
+ it('has title', () => {
+ expect(findBadge().attributes('title')).toBe(
+ 'This issue is hidden because its author has been banned.',
+ );
+ });
+});
diff --git a/spec/frontend/issuable/components/locked_badge_spec.js b/spec/frontend/issuable/components/locked_badge_spec.js
new file mode 100644
index 00000000000..73ab6e36ba1
--- /dev/null
+++ b/spec/frontend/issuable/components/locked_badge_spec.js
@@ -0,0 +1,45 @@
+import { GlBadge, GlIcon } from '@gitlab/ui';
+import { shallowMount } from '@vue/test-utils';
+import { createMockDirective, getBinding } from 'helpers/vue_mock_directive';
+import LockedBadge from '~/issuable/components/locked_badge.vue';
+
+describe('LockedBadge component', () => {
+ let wrapper;
+
+ const mountComponent = () => {
+ wrapper = shallowMount(LockedBadge, {
+ directives: {
+ GlTooltip: createMockDirective('gl-tooltip'),
+ },
+ propsData: {
+ issuableType: 'issue',
+ },
+ });
+ };
+
+ const findBadge = () => wrapper.findComponent(GlBadge);
+ const findIcon = () => wrapper.findComponent(GlIcon);
+
+ beforeEach(() => {
+ mountComponent();
+ });
+
+ it('renders warning badge', () => {
+ expect(findBadge().text()).toBe('Locked');
+ expect(findBadge().props('variant')).toEqual('warning');
+ });
+
+ it('renders lock icon', () => {
+ expect(findIcon().props('name')).toBe('lock');
+ });
+
+ it('has tooltip', () => {
+ expect(getBinding(wrapper.element, 'gl-tooltip')).not.toBeUndefined();
+ });
+
+ it('has title', () => {
+ expect(findBadge().attributes('title')).toBe(
+ 'This issue is locked. Only project members can comment.',
+ );
+ });
+});
diff --git a/spec/frontend/issues/show/components/sticky_header_spec.js b/spec/frontend/issues/show/components/sticky_header_spec.js
index dd41e3034eb..a909084956f 100644
--- a/spec/frontend/issues/show/components/sticky_header_spec.js
+++ b/spec/frontend/issues/show/components/sticky_header_spec.js
@@ -1,6 +1,7 @@
-import { GlIcon } from '@gitlab/ui';
-import { createMockDirective, getBinding } from 'helpers/vue_mock_directive';
+import { GlIcon, GlLink } from '@gitlab/ui';
import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
+import HiddenBadge from '~/issuable/components/hidden_badge.vue';
+import LockedBadge from '~/issuable/components/locked_badge.vue';
import {
issuableStatusText,
STATUS_CLOSED,
@@ -17,14 +18,12 @@ describe('StickyHeader component', () => {
let wrapper;
const findConfidentialBadge = () => wrapper.findComponent(ConfidentialityBadge);
- const findHiddenBadge = () => wrapper.findByTestId('hidden');
- const findLockedBadge = () => wrapper.findByTestId('locked');
+ const findHiddenBadge = () => wrapper.findComponent(HiddenBadge);
+ const findLockedBadge = () => wrapper.findComponent(LockedBadge);
+ const findTitle = () => wrapper.findComponent(GlLink);
const createComponent = (props = {}) => {
wrapper = shallowMountExtended(StickyHeader, {
- directives: {
- GlTooltip: createMockDirective('gl-tooltip'),
- },
propsData: {
issuableStatus: STATUS_OPEN,
issuableType: TYPE_ISSUE,
@@ -90,13 +89,6 @@ describe('StickyHeader component', () => {
const lockedBadge = findLockedBadge();
expect(lockedBadge.exists()).toBe(isLocked);
-
- if (isLocked) {
- expect(lockedBadge.attributes('title')).toBe(
- 'This issue is locked. Only project members can comment.',
- );
- expect(getBinding(lockedBadge.element, 'gl-tooltip')).not.toBeUndefined();
- }
});
it.each`
@@ -108,18 +100,11 @@ describe('StickyHeader component', () => {
const hiddenBadge = findHiddenBadge();
expect(hiddenBadge.exists()).toBe(isHidden);
-
- if (isHidden) {
- expect(hiddenBadge.attributes('title')).toBe(
- 'This issue is hidden because its author has been banned',
- );
- expect(getBinding(hiddenBadge.element, 'gl-tooltip')).not.toBeUndefined();
- }
});
it('shows with title', () => {
createComponent();
- const title = wrapper.find('a');
+ const title = findTitle();
expect(title.text()).toContain('A sticky issue');
expect(title.attributes('href')).toBe('#top');
diff --git a/spec/frontend/merge_requests/components/header_metadata_spec.js b/spec/frontend/merge_requests/components/header_metadata_spec.js
index 2823b4b9d97..64df71e1128 100644
--- a/spec/frontend/merge_requests/components/header_metadata_spec.js
+++ b/spec/frontend/merge_requests/components/header_metadata_spec.js
@@ -1,5 +1,6 @@
-import { createMockDirective, getBinding } from 'helpers/vue_mock_directive';
-import { shallowMountExtended } from 'helpers/vue_test_utils_helper';
+import { shallowMount } from '@vue/test-utils';
+import HiddenBadge from '~/issuable/components/hidden_badge.vue';
+import LockedBadge from '~/issuable/components/locked_badge.vue';
import HeaderMetadata from '~/merge_requests/components/header_metadata.vue';
import mrStore from '~/mr_notes/stores';
import ConfidentialityBadge from '~/vue_shared/components/confidentiality_badge.vue';
@@ -9,21 +10,18 @@ jest.mock('~/mr_notes/stores', () => jest.requireActual('helpers/mocks/mr_notes/
describe('HeaderMetadata component', () => {
let wrapper;
- const findConfidentialIcon = () => wrapper.findComponent(ConfidentialityBadge);
- const findLockedIcon = () => wrapper.findByTestId('locked');
- const findHiddenIcon = () => wrapper.findByTestId('hidden');
+ const findConfidentialBadge = () => wrapper.findComponent(ConfidentialityBadge);
+ const findLockedBadge = () => wrapper.findComponent(LockedBadge);
+ const findHiddenBadge = () => wrapper.findComponent(HiddenBadge);
const renderTestMessage = (renders) => (renders ? 'renders' : 'does not render');
const createComponent = ({ store, provide }) => {
- wrapper = shallowMountExtended(HeaderMetadata, {
+ wrapper = shallowMount(HeaderMetadata, {
mocks: {
$store: store,
},
provide,
- directives: {
- GlTooltip: createMockDirective('gl-tooltip'),
- },
});
};
@@ -51,42 +49,24 @@ describe('HeaderMetadata component', () => {
createComponent({ store, provide: { hidden: hiddenStatus } });
});
- it(`${renderTestMessage(lockStatus)} the locked icon`, () => {
- const lockedIcon = findLockedIcon();
-
- expect(lockedIcon.exists()).toBe(lockStatus);
-
- if (lockStatus) {
- expect(lockedIcon.attributes('title')).toBe(
- `This merge request is locked. Only project members can comment.`,
- );
- expect(getBinding(lockedIcon.element, 'gl-tooltip')).not.toBeUndefined();
- }
- });
-
- it(`${renderTestMessage(confidentialStatus)} the confidential icon`, () => {
- const confidentialIcon = findConfidentialIcon();
- expect(confidentialIcon.exists()).toBe(confidentialStatus);
+ it(`${renderTestMessage(confidentialStatus)} the confidential badge`, () => {
+ const confidentialBadge = findConfidentialBadge();
+ expect(confidentialBadge.exists()).toBe(confidentialStatus);
if (confidentialStatus && !hiddenStatus) {
- expect(confidentialIcon.props()).toMatchObject({
+ expect(confidentialBadge.props()).toMatchObject({
workspaceType: 'project',
issuableType: 'issue',
});
}
});
- it(`${renderTestMessage(confidentialStatus)} the hidden icon`, () => {
- const hiddenIcon = findHiddenIcon();
-
- expect(hiddenIcon.exists()).toBe(hiddenStatus);
+ it(`${renderTestMessage(lockStatus)} the locked badge`, () => {
+ expect(findLockedBadge().exists()).toBe(lockStatus);
+ });
- if (hiddenStatus) {
- expect(hiddenIcon.attributes('title')).toBe(
- `This merge request is hidden because its author has been banned`,
- );
- expect(getBinding(hiddenIcon.element, 'gl-tooltip')).not.toBeUndefined();
- }
+ it(`${renderTestMessage(hiddenStatus)} the hidden badge`, () => {
+ expect(findHiddenBadge().exists()).toBe(hiddenStatus);
});
},
);
diff --git a/spec/frontend/vue_shared/components/badges/__snapshots__/beta_badge_spec.js.snap b/spec/frontend/vue_shared/components/badges/__snapshots__/beta_badge_spec.js.snap
index 359aaacde0b..499a971d791 100644
--- a/spec/frontend/vue_shared/components/badges/__snapshots__/beta_badge_spec.js.snap
+++ b/spec/frontend/vue_shared/components/badges/__snapshots__/beta_badge_spec.js.snap
@@ -2,22 +2,15 @@
exports[`Beta badge component renders the badge 1`] = `
<div>
- <gl-badge-stub
- class="gl-cursor-pointer"
+ <a
+ class="badge badge-neutral badge-pill gl-badge gl-cursor-pointer md"
href="#"
- iconsize="md"
- size="md"
- variant="neutral"
+ target="_self"
>
Beta
- </gl-badge-stub>
- <gl-popover-stub
- cssclasses=""
- data-testid="beta-badge"
- showclosebutton="true"
- target="[Function]"
- title="What's Beta?"
- triggers="hover focus click"
+ </a>
+ <div
+ class="gl-popover"
>
<p>
A Beta feature is not production-ready, but is unlikely to change drastically before it's released. We encourage users to try Beta features and provide feedback.
@@ -43,6 +36,6 @@ exports[`Beta badge component renders the badge 1`] = `
Is complete or near completion.
</li>
</ul>
- </gl-popover-stub>
+ </div>
</div>
`;
diff --git a/spec/frontend/vue_shared/components/badges/__snapshots__/experiment_badge_spec.js.snap b/spec/frontend/vue_shared/components/badges/__snapshots__/experiment_badge_spec.js.snap
new file mode 100644
index 00000000000..4ad70338f3c
--- /dev/null
+++ b/spec/frontend/vue_shared/components/badges/__snapshots__/experiment_badge_spec.js.snap
@@ -0,0 +1,41 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`Experiment badge component renders the badge 1`] = `
+<div>
+ <a
+ class="badge badge-neutral badge-pill gl-badge gl-cursor-pointer md"
+ href="#"
+ target="_self"
+ >
+ Experiment
+ </a>
+ <div
+ class="gl-popover"
+ >
+ <p>
+ An Experiment is a feature that's in the process of being developed. It's not production-ready. We encourage users to try Experimental features and provide feedback.
+ </p>
+ <p
+ class="gl-mb-0"
+ >
+ An Experiment:
+ </p>
+ <ul
+ class="gl-pl-4"
+ >
+ <li>
+ May be unstable.
+ </li>
+ <li>
+ Can cause data loss.
+ </li>
+ <li>
+ Has no support and might not be documented.
+ </li>
+ <li>
+ Can be removed at any time.
+ </li>
+ </ul>
+ </div>
+</div>
+`;
diff --git a/spec/frontend/vue_shared/components/badges/beta_badge_spec.js b/spec/frontend/vue_shared/components/badges/beta_badge_spec.js
index c930c6d5708..d826ca5c7c0 100644
--- a/spec/frontend/vue_shared/components/badges/beta_badge_spec.js
+++ b/spec/frontend/vue_shared/components/badges/beta_badge_spec.js
@@ -1,4 +1,4 @@
-import { shallowMount } from '@vue/test-utils';
+import { mount } from '@vue/test-utils';
import { GlBadge } from '@gitlab/ui';
import BetaBadge from '~/vue_shared/components/badges/beta_badge.vue';
@@ -7,7 +7,7 @@ describe('Beta badge component', () => {
const findBadge = () => wrapper.findComponent(GlBadge);
const createWrapper = (props = {}) => {
- wrapper = shallowMount(BetaBadge, {
+ wrapper = mount(BetaBadge, {
propsData: { ...props },
});
};
diff --git a/spec/frontend/vue_shared/components/badges/experiment_badge_spec.js b/spec/frontend/vue_shared/components/badges/experiment_badge_spec.js
new file mode 100644
index 00000000000..3239578a173
--- /dev/null
+++ b/spec/frontend/vue_shared/components/badges/experiment_badge_spec.js
@@ -0,0 +1,32 @@
+import { mount } from '@vue/test-utils';
+import { GlBadge } from '@gitlab/ui';
+import ExperimentBadge from '~/vue_shared/components/badges/experiment_badge.vue';
+
+describe('Experiment badge component', () => {
+ let wrapper;
+
+ const findBadge = () => wrapper.findComponent(GlBadge);
+ const createWrapper = (props = {}) => {
+ wrapper = mount(ExperimentBadge, {
+ propsData: { ...props },
+ });
+ };
+
+ it('renders the badge', () => {
+ createWrapper();
+
+ expect(wrapper.element).toMatchSnapshot();
+ });
+
+ it('passes default size to badge', () => {
+ createWrapper();
+
+ expect(findBadge().props('size')).toBe('md');
+ });
+
+ it('passes given size to badge', () => {
+ createWrapper({ size: 'sm' });
+
+ expect(findBadge().props('size')).toBe('sm');
+ });
+});
diff --git a/spec/frontend/vue_shared/components/badges/hover_badge_spec.js b/spec/frontend/vue_shared/components/badges/hover_badge_spec.js
new file mode 100644
index 00000000000..68f368215c0
--- /dev/null
+++ b/spec/frontend/vue_shared/components/badges/hover_badge_spec.js
@@ -0,0 +1,50 @@
+import { mount } from '@vue/test-utils';
+import { GlBadge, GlPopover } from '@gitlab/ui';
+import HoverBadge from '~/vue_shared/components/badges/hover_badge.vue';
+
+describe('Hover badge component', () => {
+ let wrapper;
+
+ const findBadge = () => wrapper.findComponent(GlBadge);
+ const findPopover = () => wrapper.findComponent(GlPopover);
+ const createWrapper = ({ props = {}, slots } = {}) => {
+ wrapper = mount(HoverBadge, {
+ propsData: {
+ label: 'Label',
+ title: 'Title',
+ ...props,
+ },
+ slots,
+ });
+ };
+
+ it('passes label to popover', () => {
+ createWrapper();
+
+ expect(findBadge().text()).toBe('Label');
+ });
+
+ it('passes title to popover', () => {
+ createWrapper();
+
+ expect(findPopover().props('title')).toBe('Title');
+ });
+
+ it('renders the default slot', () => {
+ createWrapper({ slots: { default: '<p>This is an awesome content</p>' } });
+
+ expect(findPopover().text()).toContain('This is an awesome content');
+ });
+
+ it('passes default size to badge', () => {
+ createWrapper();
+
+ expect(findBadge().props('size')).toBe('md');
+ });
+
+ it('passes given size to badge', () => {
+ createWrapper({ props: { size: 'sm' } });
+
+ expect(findBadge().props('size')).toBe('sm');
+ });
+});
diff --git a/spec/frontend/vue_shared/issuable/list/components/issuable_item_spec.js b/spec/frontend/vue_shared/issuable/list/components/issuable_item_spec.js
index d353e336977..47da111b604 100644
--- a/spec/frontend/vue_shared/issuable/list/components/issuable_item_spec.js
+++ b/spec/frontend/vue_shared/issuable/list/components/issuable_item_spec.js
@@ -352,7 +352,7 @@ describe('IssuableItem', () => {
expect(hiddenIcon.props('name')).toBe('spam');
expect(hiddenIcon.attributes()).toMatchObject({
- title: 'This issue is hidden because its author has been banned',
+ title: 'This issue is hidden because its author has been banned.',
arialabel: 'Hidden',
});
});
diff --git a/spec/frontend/vue_shared/issuable/show/components/issuable_header_spec.js b/spec/frontend/vue_shared/issuable/show/components/issuable_header_spec.js
index 3b6f06d835b..03395e5dfc0 100644
--- a/spec/frontend/vue_shared/issuable/show/components/issuable_header_spec.js
+++ b/spec/frontend/vue_shared/issuable/show/components/issuable_header_spec.js
@@ -2,6 +2,8 @@ import { GlBadge, GlButton, GlIcon, GlLink, GlSprintf } from '@gitlab/ui';
import { shallowMount } from '@vue/test-utils';
import { resetHTMLFixture, setHTMLFixture } from 'helpers/fixtures';
import { createMockDirective, getBinding } from 'helpers/vue_mock_directive';
+import HiddenBadge from '~/issuable/components/hidden_badge.vue';
+import LockedBadge from '~/issuable/components/locked_badge.vue';
import { STATUS_CLOSED, STATUS_OPEN, STATUS_REOPENED, TYPE_ISSUE } from '~/issues/constants';
import { __ } from '~/locale';
import ConfidentialityBadge from '~/vue_shared/components/confidentiality_badge.vue';
@@ -23,8 +25,8 @@ describe('IssuableHeader component', () => {
wrapper.findAllComponents(GlIcon).filter((component) => component.props('name') === name);
const findIcon = (name) =>
findGlIconWithName(name).exists() ? findGlIconWithName(name).at(0) : undefined;
- const findBlockedIcon = () => findIcon('lock');
- const findHiddenIcon = () => findIcon('spam');
+ const findBlockedBadge = () => wrapper.findComponent(LockedBadge);
+ const findHiddenBadge = () => wrapper.findComponent(HiddenBadge);
const findExternalLinkIcon = () => findIcon('external-link');
const findFirstContributionIcon = () => findIcon('first-contribution');
const findComponentTooltip = (component) => getBinding(component.element, 'gl-tooltip');
@@ -111,49 +113,31 @@ describe('IssuableHeader component', () => {
});
});
- describe('blocked icon', () => {
+ describe('blocked badge', () => {
it('renders when issuable is blocked', () => {
createComponent({ blocked: true });
- expect(findBlockedIcon().props('ariaLabel')).toBe('Blocked');
- });
-
- it('has tooltip', () => {
- createComponent({ blocked: true });
-
- expect(findComponentTooltip(findBlockedIcon())).toBeDefined();
- expect(findBlockedIcon().attributes('title')).toBe(
- 'This issue is locked. Only project members can comment.',
- );
+ expect(findBlockedBadge().props('issuableType')).toBe('issue');
});
it('does not render when issuable is not blocked', () => {
createComponent({ blocked: false });
- expect(findBlockedIcon()).toBeUndefined();
+ expect(findBlockedBadge().exists()).toBe(false);
});
});
- describe('hidden icon', () => {
+ describe('hidden badge', () => {
it('renders when issuable is hidden', () => {
createComponent({ isHidden: true });
- expect(findHiddenIcon().props('ariaLabel')).toBe('Hidden');
- });
-
- it('has tooltip', () => {
- createComponent({ isHidden: true });
-
- expect(findComponentTooltip(findHiddenIcon())).toBeDefined();
- expect(findHiddenIcon().attributes('title')).toBe(
- 'This issue is hidden because its author has been banned',
- );
+ expect(findHiddenBadge().props('issuableType')).toBe('issue');
});
it('does not render when issuable is not hidden', () => {
createComponent({ isHidden: false });
- expect(findHiddenIcon()).toBeUndefined();
+ expect(findHiddenBadge().exists()).toBe(false);
});
});
diff --git a/spec/helpers/application_helper_spec.rb b/spec/helpers/application_helper_spec.rb
index 757f832faa4..7cf64c6e049 100644
--- a/spec/helpers/application_helper_spec.rb
+++ b/spec/helpers/application_helper_spec.rb
@@ -922,14 +922,14 @@ RSpec.describe ApplicationHelper do
context 'when resource is an issue' do
let_it_be(:resource) { build(:issue) }
- let(:expected_title) { 'This issue is hidden because its author has been banned' }
+ let(:expected_title) { 'This issue is hidden because its author has been banned.' }
it_behaves_like 'returns icon with tooltip'
end
context 'when resource is a merge request' do
let_it_be(:resource) { build(:merge_request) }
- let(:expected_title) { 'This merge request is hidden because its author has been banned' }
+ let(:expected_title) { 'This merge request is hidden because its author has been banned.' }
it_behaves_like 'returns icon with tooltip'
end
diff --git a/spec/helpers/issuables_helper_spec.rb b/spec/helpers/issuables_helper_spec.rb
index 9fe820ccae9..f0377b1cb20 100644
--- a/spec/helpers/issuables_helper_spec.rb
+++ b/spec/helpers/issuables_helper_spec.rb
@@ -568,41 +568,6 @@ RSpec.describe IssuablesHelper, feature_category: :team_planning do
end
end
- describe '#state_name_with_icon' do
- let_it_be(:project) { create(:project, :repository) }
-
- context 'for an issue' do
- let_it_be(:issue) { create(:issue, project: project) }
- let_it_be(:issue_closed) { create(:issue, :closed, project: project) }
-
- it 'returns the correct state name and icon when issue is open' do
- expect(helper.state_name_with_icon(issue)).to match_array([_('Open'), 'issues'])
- end
-
- it 'returns the correct state name and icon when issue is closed' do
- expect(helper.state_name_with_icon(issue_closed)).to match_array([_('Closed'), 'issue-closed'])
- end
- end
-
- context 'for a merge request' do
- let_it_be(:merge_request) { create(:merge_request, source_project: project) }
- let_it_be(:merge_request_merged) { create(:merge_request, :merged, source_project: project) }
- let_it_be(:merge_request_closed) { create(:merge_request, :closed, source_project: project) }
-
- it 'returns the correct state name and icon when merge request is open' do
- expect(helper.state_name_with_icon(merge_request)).to match_array([_('Open'), 'merge-request-open'])
- end
-
- it 'returns the correct state name and icon when merge request is merged' do
- expect(helper.state_name_with_icon(merge_request_merged)).to match_array([_('Merged'), 'merge'])
- end
-
- it 'returns the correct state name and icon when merge request is closed' do
- expect(helper.state_name_with_icon(merge_request_closed)).to match_array([_('Closed'), 'merge-request-close'])
- end
- end
- end
-
describe '#issuable_type_selector_data' do
using RSpec::Parameterized::TableSyntax
diff --git a/spec/lib/api/helpers_spec.rb b/spec/lib/api/helpers_spec.rb
index dd62343890e..9e7e174f4f0 100644
--- a/spec/lib/api/helpers_spec.rb
+++ b/spec/lib/api/helpers_spec.rb
@@ -773,21 +773,21 @@ RSpec.describe API::Helpers, feature_category: :shared do
end
describe '#track_event' do
- let(:user_id) { 345 }
- let(:namespace_id) { 12 }
- let(:project_id) { 56 }
+ let_it_be(:user) { create(:user) }
+ let_it_be(:namespace) { create(:namespace) }
+ let_it_be(:project) { create(:project) }
let(:event_name) { 'i_compliance_dashboard' }
let(:unknown_event) { 'unknown' }
it 'tracks internal event' do
expect(Gitlab::InternalEvents).to receive(:track_event).with(
event_name,
- user_id: user_id,
- namespace_id: namespace_id,
- project_id: project_id
+ user: user,
+ namespace: namespace,
+ project: project
)
- helper.track_event(event_name, user_id: user_id, namespace_id: namespace_id, project_id: project_id)
+ helper.track_event(event_name, user: user, namespace_id: namespace.id, project_id: project.id)
end
it 'logs an exception for unknown event' do
@@ -797,13 +797,14 @@ RSpec.describe API::Helpers, feature_category: :shared do
instance_of(Gitlab::InternalEvents::UnknownEventError),
event_name: unknown_event
)
- helper.track_event(unknown_event, user_id: user_id, namespace_id: namespace_id, project_id: project_id)
+
+ helper.track_event(unknown_event, user: user, namespace_id: namespace.id, project_id: project.id)
end
- it 'does not track event for nil user_id' do
+ it 'does not track event for nil user' do
expect(Gitlab::InternalEvents).not_to receive(:track_event)
- helper.track_event(unknown_event, user_id: nil, namespace_id: namespace_id, project_id: project_id)
+ helper.track_event(unknown_event, user: nil, namespace_id: namespace.id, project_id: project.id)
end
end
diff --git a/spec/lib/gitlab/bitbucket_server_import/importers/pull_request_importer_spec.rb b/spec/lib/gitlab/bitbucket_server_import/importers/pull_request_importer_spec.rb
index 3c84d888c92..1ae68f9efb8 100644
--- a/spec/lib/gitlab/bitbucket_server_import/importers/pull_request_importer_spec.rb
+++ b/spec/lib/gitlab/bitbucket_server_import/importers/pull_request_importer_spec.rb
@@ -48,6 +48,68 @@ RSpec.describe Gitlab::BitbucketServerImport::Importers::PullRequestImporter, fe
end
end
+ describe 'merge request diff head_commit_sha' do
+ before do
+ allow(pull_request).to receive(:source_branch_sha).and_return(source_branch_sha)
+ end
+
+ context 'when a commit with the source_branch_sha exists' do
+ let(:source_branch_sha) { project.repository.head_commit.sha }
+
+ it 'is equal to the source_branch_sha' do
+ importer.execute
+
+ merge_request = project.merge_requests.find_by_iid(pull_request.iid)
+
+ expect(merge_request.merge_request_diffs.first.head_commit_sha).to eq(source_branch_sha)
+ end
+ end
+
+ context 'when a commit with the source_branch_sha does not exist' do
+ let(:source_branch_sha) { 'x' * Commit::MIN_SHA_LENGTH }
+
+ it 'is nil' do
+ importer.execute
+
+ merge_request = project.merge_requests.find_by_iid(pull_request.iid)
+
+ expect(merge_request.merge_request_diffs.first.head_commit_sha).to be_nil
+ end
+
+ context 'when a commit containing the sha in the message exists' do
+ let(:source_branch_sha) { project.repository.head_commit.sha }
+
+ it 'is equal to the sha' do
+ message = "
+ Squashed commit of the following:
+
+ commit #{source_branch_sha}
+ Author: John Smith <john@smith.com>
+ Date: Mon Sep 18 15:58:38 2023 +0200
+
+ My commit message
+ "
+
+ Files::CreateService.new(
+ project,
+ project.creator,
+ start_branch: 'master',
+ branch_name: 'master',
+ commit_message: message,
+ file_path: 'files/lfs/ruby.rb',
+ file_content: 'testing'
+ ).execute
+
+ importer.execute
+
+ merge_request = project.merge_requests.find_by_iid(pull_request.iid)
+
+ expect(merge_request.merge_request_diffs.first.head_commit_sha).to eq(source_branch_sha)
+ end
+ end
+ end
+ end
+
it 'logs its progress' do
expect(Gitlab::BitbucketServerImport::Logger)
.to receive(:info).with(include(message: 'starting', iid: pull_request.iid)).and_call_original
diff --git a/spec/models/ci/pipeline_spec.rb b/spec/models/ci/pipeline_spec.rb
index 7e572e2fdc6..76ebd1dbf5e 100644
--- a/spec/models/ci/pipeline_spec.rb
+++ b/spec/models/ci/pipeline_spec.rb
@@ -3250,22 +3250,23 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep, feature_category:
shared_examples 'a method that returns all merge requests for a given pipeline' do
let(:pipeline) { create(:ci_empty_pipeline, status: 'created', project: pipeline_project, ref: 'master') }
+ let(:merge_request) do
+ create(
+ :merge_request,
+ source_project: pipeline_project,
+ target_project: project,
+ source_branch: pipeline.ref
+ )
+ end
it 'returns all merge requests having the same source branch and the pipeline sha' do
- merge_request = create(:merge_request, source_project: pipeline_project, target_project: project, source_branch: pipeline.ref)
-
- create(:merge_request_diff, merge_request: merge_request).tap do |diff|
- create(:merge_request_diff_commit, merge_request_diff: diff, sha: pipeline.sha)
- end
+ create(:merge_request_diff_commit, merge_request_diff: merge_request.merge_request_diff, sha: pipeline.sha)
expect(pipeline.all_merge_requests).to eq([merge_request])
end
it "doesn't return merge requests having the same source branch without the pipeline sha" do
- merge_request = create(:merge_request, source_project: pipeline_project, target_project: project, source_branch: pipeline.ref)
- create(:merge_request_diff, merge_request: merge_request).tap do |diff|
- create(:merge_request_diff_commit, merge_request_diff: diff, sha: 'unrelated')
- end
+ create(:merge_request_diff_commit, merge_request_diff: merge_request.merge_request_diff, sha: 'unrelated')
expect(pipeline.all_merge_requests).to be_empty
end
diff --git a/spec/requests/api/usage_data_spec.rb b/spec/requests/api/usage_data_spec.rb
index c8f1e8d6973..d296566853e 100644
--- a/spec/requests/api/usage_data_spec.rb
+++ b/spec/requests/api/usage_data_spec.rb
@@ -200,6 +200,9 @@ RSpec.describe API::UsageData, feature_category: :service_ping do
end
context 'with authentication' do
+ let_it_be(:namespace) { create(:namespace) }
+ let_it_be(:project) { create(:project) }
+
before do
stub_application_setting(usage_ping_enabled: true)
allow(Gitlab::RequestForgeryProtection).to receive(:verified?).and_return(true)
@@ -207,11 +210,10 @@ RSpec.describe API::UsageData, feature_category: :service_ping do
context 'with correct params' do
it 'returns status ok' do
- expect(Gitlab::InternalEvents).to receive(:track_event).with(known_event, anything)
- # allow other events to also get triggered
- allow(Gitlab::InternalEvents).to receive(:track_event)
+ expect(Gitlab::InternalEvents).to receive(:track_event)
+ .with(known_event, user: user, namespace: namespace, project: project)
- post api(endpoint, user), params: { event: known_event, namespace_id: namespace_id, project_id: project_id }
+ post api(endpoint, user), params: { event: known_event, namespace_id: namespace.id, project_id: project.id }
expect(response).to have_gitlab_http_status(:ok)
end