Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--app/assets/javascripts/cycle_analytics/components/banner.vue56
-rw-r--r--app/assets/javascripts/jobs/components/table/cells/actions_cell.vue18
-rw-r--r--app/assets/javascripts/jobs/components/table/jobs_table.vue2
-rw-r--r--app/assets/javascripts/pipeline_editor/graphql/queries/latest_commit_sha.query.graphql11
-rw-r--r--app/assets/javascripts/pipeline_editor/pipeline_editor_app.vue16
-rw-r--r--app/assets/stylesheets/page_bundles/boards.scss4
-rw-r--r--app/models/ci/pipeline.rb2
-rw-r--r--config/feature_flags/development/ci_modified_paths_of_external_prs.yml8
-rw-r--r--config/feature_flags/development/preload_repo_cache.yml8
-rw-r--r--db/post_migrate/20210907211557_finalize_ci_builds_bigint_conversion.rb217
-rw-r--r--db/schema_migrations/202109072115571
-rw-r--r--db/structure.sql7
-rw-r--r--doc/administration/clusters/kas.md2
-rw-r--r--doc/administration/monitoring/prometheus/gitlab_exporter.md7
-rw-r--r--doc/administration/monitoring/prometheus/index.md8
-rw-r--r--doc/api/resource_label_events.md2
-rw-r--r--doc/update/index.md73
-rw-r--r--doc/update/zero_downtime.md942
-rw-r--r--doc/user/project/import/bitbucket.md18
-rw-r--r--lib/api/projects_relation_builder.rb2
-rw-r--r--locale/gitlab.pot9
-rw-r--r--spec/db/schema_spec.rb1
-rw-r--r--spec/frontend/cycle_analytics/banner_spec.js47
-rw-r--r--spec/frontend/pipeline_editor/mock_data.js36
-rw-r--r--spec/frontend/pipeline_editor/pipeline_editor_app_spec.js13
-rw-r--r--spec/frontend_integration/README.md27
-rw-r--r--spec/models/ci/pipeline_spec.rb10
27 files changed, 1026 insertions, 521 deletions
diff --git a/app/assets/javascripts/cycle_analytics/components/banner.vue b/app/assets/javascripts/cycle_analytics/components/banner.vue
deleted file mode 100644
index 006bbaec6cb..00000000000
--- a/app/assets/javascripts/cycle_analytics/components/banner.vue
+++ /dev/null
@@ -1,56 +0,0 @@
-<script>
-import { GlIcon } from '@gitlab/ui';
-import iconCycleAnalyticsSplash from 'icons/_icon_cycle_analytics_splash.svg';
-
-export default {
- components: {
- GlIcon,
- },
- props: {
- documentationLink: {
- type: String,
- required: true,
- },
- },
- computed: {
- iconCycleAnalyticsSplash() {
- return iconCycleAnalyticsSplash;
- },
- },
- methods: {
- dismissOverviewDialog() {
- this.$emit('dismiss-overview-dialog');
- },
- },
-};
-</script>
-<template>
- <div class="landing content-block">
- <button
- :aria-label="__('Dismiss Value Stream Analytics introduction box')"
- class="js-ca-dismiss-button dismiss-button"
- type="button"
- @click="dismissOverviewDialog"
- >
- <gl-icon name="close" />
- </button>
- <div
- class="svg-container"
- v-html="iconCycleAnalyticsSplash /* eslint-disable-line vue/no-v-html */"
- ></div>
- <div class="inner-content">
- <h4>{{ __('Introducing Value Stream Analytics') }}</h4>
- <p>
- {{
- __(`Value Stream Analytics gives an overview
-of how much time it takes to go from idea to production in your project.`)
- }}
- </p>
- <p>
- <a :href="documentationLink" target="_blank" rel="nofollow" class="btn">
- {{ __('Read more') }}
- </a>
- </p>
- </div>
- </div>
-</template>
diff --git a/app/assets/javascripts/jobs/components/table/cells/actions_cell.vue b/app/assets/javascripts/jobs/components/table/cells/actions_cell.vue
index cfbd92c0fc0..6b3a4424a5b 100644
--- a/app/assets/javascripts/jobs/components/table/cells/actions_cell.vue
+++ b/app/assets/javascripts/jobs/components/table/cells/actions_cell.vue
@@ -135,15 +135,6 @@ export default {
<template>
<gl-button-group>
- <gl-button
- v-if="shouldDisplayArtifacts"
- icon="download"
- :title="$options.ACTIONS_DOWNLOAD_ARTIFACTS"
- :href="artifactDownloadPath"
- rel="nofollow"
- download
- data-testid="download-artifacts"
- />
<template v-if="canReadJob">
<gl-button v-if="isActive" icon="cancel" :title="$options.CANCEL" @click="cancelJob()" />
<template v-else-if="isScheduled">
@@ -191,5 +182,14 @@ export default {
/>
</template>
</template>
+ <gl-button
+ v-if="shouldDisplayArtifacts"
+ icon="download"
+ :title="$options.ACTIONS_DOWNLOAD_ARTIFACTS"
+ :href="artifactDownloadPath"
+ rel="nofollow"
+ download
+ data-testid="download-artifacts"
+ />
</gl-button-group>
</template>
diff --git a/app/assets/javascripts/jobs/components/table/jobs_table.vue b/app/assets/javascripts/jobs/components/table/jobs_table.vue
index 076c0e78b11..298c99c4162 100644
--- a/app/assets/javascripts/jobs/components/table/jobs_table.vue
+++ b/app/assets/javascripts/jobs/components/table/jobs_table.vue
@@ -141,7 +141,7 @@ export default {
</template>
<template #cell(actions)="{ item }">
- <actions-cell :job="item" />
+ <actions-cell class="gl-float-right" :job="item" />
</template>
</gl-table>
</template>
diff --git a/app/assets/javascripts/pipeline_editor/graphql/queries/latest_commit_sha.query.graphql b/app/assets/javascripts/pipeline_editor/graphql/queries/latest_commit_sha.query.graphql
index 219c23bb22b..02d49507947 100644
--- a/app/assets/javascripts/pipeline_editor/graphql/queries/latest_commit_sha.query.graphql
+++ b/app/assets/javascripts/pipeline_editor/graphql/queries/latest_commit_sha.query.graphql
@@ -1,11 +1,10 @@
query getLatestCommitSha($projectPath: ID!, $ref: String) {
project(fullPath: $projectPath) {
- pipelines(ref: $ref) {
- nodes {
- id
- sha
- path
- commitPath
+ repository {
+ tree(ref: $ref) {
+ lastCommit {
+ sha
+ }
}
}
}
diff --git a/app/assets/javascripts/pipeline_editor/pipeline_editor_app.vue b/app/assets/javascripts/pipeline_editor/pipeline_editor_app.vue
index dcbfa431126..e70417145ab 100644
--- a/app/assets/javascripts/pipeline_editor/pipeline_editor_app.vue
+++ b/app/assets/javascripts/pipeline_editor/pipeline_editor_app.vue
@@ -164,22 +164,8 @@ export default {
};
},
update(data) {
- const pipelineNodes = data.project?.pipelines?.nodes ?? [];
+ const latestCommitSha = data.project?.repository?.tree?.lastCommit?.sha;
- // it's possible to query for the commit sha too early after an update
- // (e.g. after committing a new branch, we might query for the commit sha
- // but the pipeline nodes are still empty).
- // in this case, we start polling until we get a commit sha.
- if (pipelineNodes.length === 0) {
- if (![EDITOR_APP_STATUS_LOADING, EDITOR_APP_STATUS_EMPTY].includes(this.appStatus)) {
- this.$apollo.queries.commitSha.startPolling(COMMIT_SHA_POLL_INTERVAL);
- return this.commitSha;
- }
-
- return '';
- }
-
- const latestCommitSha = pipelineNodes[0].sha;
if (this.isFetchingCommitSha && latestCommitSha === this.commitSha) {
this.$apollo.queries.commitSha.startPolling(COMMIT_SHA_POLL_INTERVAL);
return this.commitSha;
diff --git a/app/assets/stylesheets/page_bundles/boards.scss b/app/assets/stylesheets/page_bundles/boards.scss
index 00c28d2f3f1..4806f4b054b 100644
--- a/app/assets/stylesheets/page_bundles/boards.scss
+++ b/app/assets/stylesheets/page_bundles/boards.scss
@@ -430,6 +430,10 @@
height: $input-height;
}
+.issue-boards-content {
+ isolation: isolate;
+}
+
.issue-boards-content.is-focused {
position: fixed;
width: 100%;
diff --git a/app/models/ci/pipeline.rb b/app/models/ci/pipeline.rb
index 4f718814354..1a0cec3c935 100644
--- a/app/models/ci/pipeline.rb
+++ b/app/models/ci/pipeline.rb
@@ -1107,7 +1107,7 @@ module Ci
merge_request.modified_paths
elsif branch_updated?
push_details.modified_paths
- elsif external_pull_request? && ::Feature.enabled?(:ci_modified_paths_of_external_prs, project, default_enabled: :yaml)
+ elsif external_pull_request?
external_pull_request.modified_paths
end
end
diff --git a/config/feature_flags/development/ci_modified_paths_of_external_prs.yml b/config/feature_flags/development/ci_modified_paths_of_external_prs.yml
deleted file mode 100644
index 62f7eb4663f..00000000000
--- a/config/feature_flags/development/ci_modified_paths_of_external_prs.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-name: ci_modified_paths_of_external_prs
-introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60736
-rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/330605
-milestone: '13.12'
-type: development
-group: group::pipeline authoring
-default_enabled: true
diff --git a/config/feature_flags/development/preload_repo_cache.yml b/config/feature_flags/development/preload_repo_cache.yml
deleted file mode 100644
index 42f0ac7dacd..00000000000
--- a/config/feature_flags/development/preload_repo_cache.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-name: preload_repo_cache
-introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/69627
-rollout_issue_url:
-milestone: '14.3'
-type: development
-group: group::project management
-default_enabled: false
diff --git a/db/post_migrate/20210907211557_finalize_ci_builds_bigint_conversion.rb b/db/post_migrate/20210907211557_finalize_ci_builds_bigint_conversion.rb
deleted file mode 100644
index b805364d570..00000000000
--- a/db/post_migrate/20210907211557_finalize_ci_builds_bigint_conversion.rb
+++ /dev/null
@@ -1,217 +0,0 @@
-# frozen_string_literal: true
-
-class FinalizeCiBuildsBigintConversion < Gitlab::Database::Migration[1.0]
- disable_ddl_transaction!
-
- TABLE_NAME = 'ci_builds'
- PK_INDEX_NAME = 'index_ci_builds_on_converted_id'
-
- SECONDARY_INDEXES = [
- {
- original_name: :index_ci_builds_on_commit_id_artifacts_expired_at_and_id,
- temporary_name: :index_ci_builds_on_commit_id_expire_at_and_converted_id,
- columns: [:commit_id, :artifacts_expire_at, :id_convert_to_bigint],
- options: {
- where: "type::text = 'Ci::Build'::text
- AND (retried = false OR retried IS NULL)
- AND (name::text = ANY (ARRAY['sast'::character varying::text,
- 'secret_detection'::character varying::text,
- 'dependency_scanning'::character varying::text,
- 'container_scanning'::character varying::text,
- 'dast'::character varying::text]))"
- }
- },
- {
- original_name: :index_ci_builds_on_project_id_and_id,
- temporary_name: :index_ci_builds_on_project_and_converted_id,
- columns: [:project_id, :id_convert_to_bigint],
- options: {}
- },
- {
- original_name: :index_ci_builds_on_runner_id_and_id_desc,
- temporary_name: :index_ci_builds_on_runner_id_and_converted_id_desc,
- columns: [:runner_id, :id_convert_to_bigint],
- options: { order: { id_convert_to_bigint: :desc } }
- },
- {
- original_name: :index_for_resource_group,
- temporary_name: :index_ci_builds_on_resource_group_and_converted_id,
- columns: [:resource_group_id, :id_convert_to_bigint],
- options: { where: 'resource_group_id IS NOT NULL' }
- },
- {
- original_name: :index_security_ci_builds_on_name_and_id_parser_features,
- temporary_name: :index_security_ci_builds_on_name_and_converted_id_parser,
- columns: [:name, :id_convert_to_bigint],
- options: {
- where: "(name::text = ANY (ARRAY['container_scanning'::character varying::text,
- 'dast'::character varying::text,
- 'dependency_scanning'::character varying::text,
- 'license_management'::character varying::text,
- 'sast'::character varying::text,
- 'secret_detection'::character varying::text,
- 'coverage_fuzzing'::character varying::text,
- 'license_scanning'::character varying::text])
- ) AND type::text = 'Ci::Build'::text"
- }
- }
- ].freeze
-
- MANUAL_INDEX_NAMES = {
- original_name: :index_ci_builds_runner_id_pending_covering,
- temporary_name: :index_ci_builds_runner_id_and_converted_id_pending_covering
- }.freeze
-
- REFERENCING_FOREIGN_KEYS = [
- [:ci_build_needs, :build_id, :cascade, 'fk_rails_'],
- [:ci_build_pending_states, :build_id, :cascade, 'fk_rails_'],
- [:ci_build_report_results, :build_id, :cascade, 'fk_rails_'],
- [:ci_build_trace_chunks, :build_id, :cascade, 'fk_rails_'],
- [:ci_build_trace_metadata, :build_id, :cascade, 'fk_rails_'],
- [:ci_builds_runner_session, :build_id, :cascade, 'fk_rails_'],
- [:ci_builds_metadata, :build_id, :cascade, 'fk_'],
- [:ci_job_artifacts, :job_id, :cascade, 'fk_rails_'],
- [:ci_job_variables, :job_id, :cascade, 'fk_rails_'],
- [:ci_pending_builds, :build_id, :cascade, 'fk_rails_'],
- [:ci_resources, :build_id, :nullify, 'fk_'],
- [:ci_running_builds, :build_id, :cascade, 'fk_rails_'],
- [:ci_sources_pipelines, :source_job_id, :cascade, 'fk_'],
- [:ci_unit_test_failures, :build_id, :cascade, 'fk_'],
- [:dast_scanner_profiles_builds, :ci_build_id, :cascade, 'fk_'],
- [:dast_site_profiles_builds, :ci_build_id, :cascade, 'fk_'],
- [:pages_deployments, :ci_build_id, :nullify, 'fk_rails_'],
- [:requirements_management_test_reports, :build_id, :nullify, 'fk_rails_'],
- [:security_scans, :build_id, :cascade, 'fk_rails_'],
- [:terraform_state_versions, :ci_build_id, :nullify, 'fk_']
- ].freeze
-
- def up
- ensure_batched_background_migration_is_finished(
- job_class_name: 'CopyColumnUsingBackgroundMigrationJob',
- table_name: TABLE_NAME,
- column_name: 'id',
- job_arguments: [%w[id stage_id], %w[id_convert_to_bigint stage_id_convert_to_bigint]]
- )
-
- # Remove this upfront since this table is being dropped, and doesn't need to be migrated
- if foreign_key_exists?(:dep_ci_build_trace_sections, TABLE_NAME, column: :build_id)
- remove_foreign_key(:dep_ci_build_trace_sections, TABLE_NAME, column: :build_id)
- end
-
- swap_columns
- end
-
- def down
- swap_columns
- end
-
- private
-
- def swap_columns
- # Copy existing indexes from the original column to the new column
- create_indexes
- # Copy existing FKs from the original column to the new column
- create_referencing_foreign_keys
-
- # Remove existing FKs from the referencing tables, so we don't have to lock on them when we drop the existing PK
- replace_referencing_foreign_keys
-
- with_lock_retries(raise_on_exhaustion: true) do
- quoted_table_name = quote_table_name(TABLE_NAME)
-
- # Swap the original and new column names
- temporary_name = 'id_tmp'
- execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(:id)} TO #{quote_column_name(temporary_name)}"
- execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(:id_convert_to_bigint)} TO #{quote_column_name(:id)}"
- execute "ALTER TABLE #{quoted_table_name} RENAME COLUMN #{quote_column_name(temporary_name)} TO #{quote_column_name(:id_convert_to_bigint)}"
-
- # Reset the function so PG drops the plan cache for the incorrect integer type
- function_name = Gitlab::Database::UnidirectionalCopyTrigger.on_table(TABLE_NAME)
- .name([:id, :stage_id], [:id_convert_to_bigint, :stage_id_convert_to_bigint])
- execute "ALTER FUNCTION #{quote_table_name(function_name)} RESET ALL"
-
- # Swap defaults of the two columns, and change ownership of the sequence to the new id
- execute "ALTER SEQUENCE ci_builds_id_seq OWNED BY #{TABLE_NAME}.id"
- change_column_default TABLE_NAME, :id, -> { "nextval('ci_builds_id_seq'::regclass)" }
- change_column_default TABLE_NAME, :id_convert_to_bigint, 0
-
- # Swap the PK constraint from the original column to the new column
- # We deliberately don't CASCADE here because the old FKs should be removed already
- execute "ALTER TABLE #{quoted_table_name} DROP CONSTRAINT ci_builds_pkey"
- rename_index TABLE_NAME, PK_INDEX_NAME, 'ci_builds_pkey'
- execute "ALTER TABLE #{quoted_table_name} ADD CONSTRAINT ci_builds_pkey PRIMARY KEY USING INDEX ci_builds_pkey"
-
- # Remove old column indexes and change new column indexes to have the original names
- rename_secondary_indexes # rubocop:disable Migration/WithLockRetriesDisallowedMethod
- end
- end
-
- def create_indexes
- add_concurrent_index TABLE_NAME, :id_convert_to_bigint, unique: true, name: PK_INDEX_NAME
-
- SECONDARY_INDEXES.each do |index_definition|
- options = index_definition[:options]
- options[:name] = index_definition[:temporary_name]
-
- add_concurrent_index(TABLE_NAME, index_definition[:columns], options)
- end
-
- unless index_name_exists?(TABLE_NAME, MANUAL_INDEX_NAMES[:temporary_name])
- execute(<<~SQL)
- CREATE INDEX CONCURRENTLY #{MANUAL_INDEX_NAMES[:temporary_name]}
- ON ci_builds (runner_id, id_convert_to_bigint) INCLUDE (project_id)
- WHERE status::text = 'pending'::text AND type::text = 'Ci::Build'::text
- SQL
- end
- end
-
- def rename_secondary_indexes
- (SECONDARY_INDEXES + [MANUAL_INDEX_NAMES]).each do |index_definition|
- remove_index(TABLE_NAME, name: index_definition[:original_name]) # rubocop:disable Migration/RemoveIndex
- rename_index(TABLE_NAME, index_definition[:temporary_name], index_definition[:original_name])
- end
- end
-
- def create_referencing_foreign_keys
- REFERENCING_FOREIGN_KEYS.each do |(from_table, column, on_delete, prefix)|
- # Don't attempt to create the FK if one already exists from the table to the new column
- # The check in `add_concurrent_foreign_key` already checks for this, but it looks for the foreign key
- # with the new name only (containing the `_tmp` suffix).
- #
- # Since we might partially rename FKs and re-run the migration, we also have to check and see if a FK exists
- # on those columns that might not match the `_tmp` name.
- next if foreign_key_exists?(from_table, TABLE_NAME, column: column, primary_key: :id_convert_to_bigint)
-
- temporary_name = "#{concurrent_foreign_key_name(from_table, column, prefix: prefix)}_tmp"
-
- add_concurrent_foreign_key(
- from_table,
- TABLE_NAME,
- column: column,
- target_column: :id_convert_to_bigint,
- name: temporary_name,
- on_delete: on_delete,
- reverse_lock_order: true)
- end
- end
-
- def replace_referencing_foreign_keys
- REFERENCING_FOREIGN_KEYS.each do |(from_table, column, _, prefix)|
- existing_name = concurrent_foreign_key_name(from_table, column, prefix: prefix)
-
- # Don't attempt to replace the FK unless it exists and points at the original column.
- # This could happen if the migration is re-run due to failing midway.
- next unless foreign_key_exists?(from_table, TABLE_NAME, column: column, primary_key: :id, name: existing_name)
-
- with_lock_retries do
- # Explicitly lock table in order of parent, child to attempt to avoid deadlocks
- execute "LOCK TABLE #{TABLE_NAME}, #{from_table} IN ACCESS EXCLUSIVE MODE"
-
- temporary_name = "#{existing_name}_tmp"
-
- remove_foreign_key(from_table, TABLE_NAME, column: column, primary_key: :id, name: existing_name)
- rename_constraint(from_table, temporary_name, existing_name)
- end
- end
- end
-end
diff --git a/db/schema_migrations/20210907211557 b/db/schema_migrations/20210907211557
deleted file mode 100644
index e89552729ba..00000000000
--- a/db/schema_migrations/20210907211557
+++ /dev/null
@@ -1 +0,0 @@
-387dcbda7c3b32050298d8a679361a17916a66d0ab686211f0d1a0dc708c4a74 \ No newline at end of file
diff --git a/db/structure.sql b/db/structure.sql
index 4e327471c3a..16a0b30c9f8 100644
--- a/db/structure.sql
+++ b/db/structure.sql
@@ -11306,7 +11306,7 @@ CREATE TABLE ci_build_trace_metadata (
);
CREATE TABLE ci_builds (
- id_convert_to_bigint integer DEFAULT 0 NOT NULL,
+ id integer NOT NULL,
status character varying,
finished_at timestamp without time zone,
trace text,
@@ -11351,7 +11351,7 @@ CREATE TABLE ci_builds (
waiting_for_resource_at timestamp with time zone,
processed boolean,
scheduling_type smallint,
- id bigint NOT NULL,
+ id_convert_to_bigint bigint DEFAULT 0 NOT NULL,
stage_id bigint,
CONSTRAINT check_1e2fbd1b39 CHECK ((lock_version IS NOT NULL))
);
@@ -27534,6 +27534,9 @@ ALTER TABLE ONLY releases
ALTER TABLE ONLY geo_event_log
ADD CONSTRAINT fk_4a99ebfd60 FOREIGN KEY (repositories_changed_event_id) REFERENCES geo_repositories_changed_events(id) ON DELETE CASCADE;
+ALTER TABLE ONLY dep_ci_build_trace_sections
+ ADD CONSTRAINT fk_4ebe41f502 FOREIGN KEY (build_id) REFERENCES ci_builds(id) ON DELETE CASCADE;
+
ALTER TABLE ONLY alert_management_alerts
ADD CONSTRAINT fk_51ab4b6089 FOREIGN KEY (prometheus_alert_id) REFERENCES prometheus_alerts(id) ON DELETE CASCADE;
diff --git a/doc/administration/clusters/kas.md b/doc/administration/clusters/kas.md
index 7c541e5ccc4..6afaff73396 100644
--- a/doc/administration/clusters/kas.md
+++ b/doc/administration/clusters/kas.md
@@ -130,7 +130,7 @@ or the path to `config.yaml` inside the project is not valid.
To fix this, ensure that the paths to the configuration repository and to the `config.yaml` file
are correct.
-### KAS logs - dial tcp <GITLAB_INTERNAL_IP>:443: connect: connection refused
+### KAS logs - `dial tcp <GITLAB_INTERNAL_IP>:443: connect: connection refused`
If you are running a self-managed GitLab instance and:
diff --git a/doc/administration/monitoring/prometheus/gitlab_exporter.md b/doc/administration/monitoring/prometheus/gitlab_exporter.md
index 4ba4cad9143..d9852524aec 100644
--- a/doc/administration/monitoring/prometheus/gitlab_exporter.md
+++ b/doc/administration/monitoring/prometheus/gitlab_exporter.md
@@ -6,8 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# GitLab exporter **(FREE SELF)**
->- Available since [Omnibus GitLab 8.17](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/1132).
->- Renamed from `GitLab monitor exporter` to `GitLab exporter` in [GitLab 12.3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/16511).
+> Renamed from `GitLab monitor exporter` to `GitLab exporter` in [GitLab 12.3](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/16511).
The [GitLab exporter](https://gitlab.com/gitlab-org/gitlab-exporter) enables you to
measure various GitLab metrics pulled from Redis and the database in Omnibus GitLab
@@ -33,8 +32,8 @@ the GitLab exporter exposed at `localhost:9168`.
## Use a different Rack server
->- Introduced in [Omnibus GitLab 13.8](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4896).
->- WEBrick is now the default Rack server instead of Puma.
+> - Introduced in [Omnibus GitLab 13.8](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4896).
+> - WEBrick is now the default Rack server instead of Puma.
By default, the GitLab exporter runs on [WEBrick](https://github.com/ruby/webrick), a single-threaded Ruby web server.
You can choose a different Rack server that better matches your performance needs.
diff --git a/doc/administration/monitoring/prometheus/index.md b/doc/administration/monitoring/prometheus/index.md
index dd81f71d418..e04aad9c6b8 100644
--- a/doc/administration/monitoring/prometheus/index.md
+++ b/doc/administration/monitoring/prometheus/index.md
@@ -55,8 +55,7 @@ To disable Prometheus and all of its exporters, as well as any added in the futu
### Changing the port and address Prometheus listens on
WARNING:
-The following change was added in [Omnibus GitLab 8.17](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/1261). Although possible,
-it's not recommended to change the port Prometheus listens
+Although possible, it's not recommended to change the port Prometheus listens
on, as this might affect or conflict with other services running on the GitLab
server. Proceed at your own risk.
@@ -330,8 +329,6 @@ To add a Prometheus dashboard for a single server GitLab setup:
## GitLab metrics
-> Introduced in GitLab 9.3.
-
GitLab monitors its own internal service metrics, and makes them available at the `/-/metrics` endpoint. Unlike other exporters, this endpoint requires authentication as it's available on the same URL and port as user traffic.
Read more about the [GitLab Metrics](gitlab_metrics.md).
@@ -380,9 +377,6 @@ The GitLab exporter allows you to measure various GitLab metrics, pulled from Re
## Configuring Prometheus to monitor Kubernetes
-> - Introduced in GitLab 9.0.
-> - Pod monitoring introduced in GitLab 9.4.
-
If your GitLab server is running within Kubernetes, Prometheus collects metrics from the Nodes and [annotated Pods](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) in the cluster, including performance data on each container. This is particularly helpful if your CI/CD environments run in the same cluster, as you can use the [Prometheus project integration](../../../user/project/integrations/prometheus.md) to monitor them.
To disable the monitoring of Kubernetes:
diff --git a/doc/api/resource_label_events.md b/doc/api/resource_label_events.md
index c5292059c0f..9c05d32c992 100644
--- a/doc/api/resource_label_events.md
+++ b/doc/api/resource_label_events.md
@@ -95,7 +95,7 @@ Parameters:
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/projects/5/issues/11/resource_label_events/1"
```
-## Epics **(ULTIMATE)**
+## Epics **(PREMIUM)**
### List group epic label events
diff --git a/doc/update/index.md b/doc/update/index.md
index 0009b5d1ec6..2d685af0875 100644
--- a/doc/update/index.md
+++ b/doc/update/index.md
@@ -180,7 +180,7 @@ migration](../integration/elasticsearch.md#retry-a-halted-migration).
Upgrading across multiple GitLab versions in one go is *only possible with downtime*.
The following examples assume a downtime upgrade.
-See the section below for [zero downtime upgrades](#upgrading-without-downtime).
+If you don't want any downtime, read how to [upgrade with zero downtime](zero_downtime.md).
Find where your version sits in the upgrade path below, and upgrade GitLab
accordingly, while also consulting the
@@ -233,76 +233,7 @@ upgraded to. This is to ensure [compatibility with GitLab versions](https://docs
## Upgrading without downtime
-Starting with GitLab 9.1.0 it's possible to upgrade to a newer major, minor, or
-patch version of GitLab without having to take your GitLab instance offline.
-However, for this to work there are the following requirements:
-
-- You can only upgrade 1 minor release at a time. So from 9.1 to 9.2, not to
- 9.3. If you skip releases, database modifications may be run in the wrong
- sequence [and leave the database schema in a broken state](https://gitlab.com/gitlab-org/gitlab/-/issues/321542).
-- You have to use [post-deployment
- migrations](../development/post_deployment_migrations.md) (included in
- [zero downtime update steps below](#steps)).
-- You are using PostgreSQL. Starting from GitLab 12.1, MySQL is not supported.
-- Multi-node GitLab instance. Single-node instances may experience brief interruptions
- [as services restart (Puma in particular)](https://docs.gitlab.com/omnibus/update/README.html#single-node-deployment).
-
-Most of the time you can safely upgrade from a patch release to the next minor
-release if the patch release is not the latest. For example, upgrading from
-9.1.1 to 9.2.0 should be safe even if 9.1.2 has been released. We do recommend
-you check the release posts of any releases between your current and target
-version just in case they include any migrations that may require you to upgrade
-1 release at a time.
-
-Some releases may also include so called "background migrations". These
-migrations are performed in the background by Sidekiq and are often used for
-migrating data. Background migrations are only added in the monthly releases.
-
-Certain major/minor releases may require a set of background migrations to be
-finished. To guarantee this, such a release processes any remaining jobs
-before continuing the upgrading procedure. While this doesn't require downtime
-(if the above conditions are met) we require that you [wait for background
-migrations to complete](#checking-for-background-migrations-before-upgrading)
-between each major/minor release upgrade.
-The time necessary to complete these migrations can be reduced by
-increasing the number of Sidekiq workers that can process jobs in the
-`background_migration` queue. To see the size of this queue,
-[Check for background migrations before upgrading](#checking-for-background-migrations-before-upgrading).
-
-As a rule of thumb, any database smaller than 10 GB doesn't take too much time to
-upgrade; perhaps an hour at most per minor release. Larger databases however may
-require more time, but this is highly dependent on the size of the database and
-the migrations that are being performed.
-
-### Examples
-
-To help explain this, let's look at some examples.
-
-**Example 1:** You are running a large GitLab installation using version 9.4.2,
-which is the latest patch release of 9.4. When GitLab 9.5.0 is released this
-installation can be safely upgraded to 9.5.0 without requiring downtime if the
-requirements mentioned above are met. You can also skip 9.5.0 and upgrade to
-9.5.1 after it's released, but you **can not** upgrade straight to 9.6.0; you
-_have_ to first upgrade to a 9.5.Z release.
-
-**Example 2:** You are running a large GitLab installation using version 9.4.2,
-which is the latest patch release of 9.4. GitLab 9.5 includes some background
-migrations, and 10.0 requires these to be completed (processing any
-remaining jobs for you). Skipping 9.5 is not possible without downtime, and due
-to the background migrations would require potentially hours of downtime
-depending on how long it takes for the background migrations to complete. To
-work around this you have to upgrade to 9.5.Z first, then wait at least a
-week before upgrading to 10.0.
-
-**Example 3:** You use MySQL as the database for GitLab. Any upgrade to a new
-major/minor release requires downtime. If a release includes any background
-migrations this could potentially lead to hours of downtime, depending on the
-size of your database. To work around this you must use PostgreSQL and
-meet the other online upgrade requirements mentioned above.
-
-### Steps
-
-Steps to [upgrade without downtime](https://docs.gitlab.com/omnibus/update/README.html#zero-downtime-updates).
+Read how to [upgrade without downtime](zero_downtime.md).
## Upgrading between editions
diff --git a/doc/update/zero_downtime.md b/doc/update/zero_downtime.md
new file mode 100644
index 00000000000..f0e6377f355
--- /dev/null
+++ b/doc/update/zero_downtime.md
@@ -0,0 +1,942 @@
+---
+stage: Enablement
+group: Distribution
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/engineering/ux/technical-writing/#designated-technical-writers
+---
+
+# Zero downtime upgrades
+
+Starting with GitLab 9.1.0 it's possible to upgrade to a newer major, minor, or
+patch version of GitLab without having to take your GitLab instance offline.
+However, for this to work there are the following requirements:
+
+- You can only upgrade 1 minor release at a time. So from 9.1 to 9.2, not to
+ 9.3. If you skip releases, database modifications may be run in the wrong
+ sequence [and leave the database schema in a broken state](https://gitlab.com/gitlab-org/gitlab/-/issues/321542).
+- You have to use [post-deployment migrations](../development/post_deployment_migrations.md).
+- You are using PostgreSQL. Starting from GitLab 12.1, MySQL is not supported.
+- Multi-node GitLab instance. Single-node instances may experience brief interruptions
+ [as services restart (Puma in particular)](#single-node-deployment).
+
+If you meet all the requirements above, follow these instructions in order. There are three sets of steps, depending on your deployment type:
+
+| Deployment type | Description |
+| --------------------------------------------------------------- | ------------------------------------------------ |
+| [Single-node](#single-node-deployment) | GitLab CE/EE on a single node |
+| [Gitaly Cluster](#gitaly-cluster) | GitLab CE/EE using HA architecture for Gitaly Cluster |
+| [Multi-node / PostgreSQL HA](#use-postgresql-ha) | GitLab CE/EE using HA architecture for PostgreSQL |
+| [Multi-node / Redis HA](#use-redis-ha-using-sentinel) | GitLab CE/EE using HA architecture for Redis |
+| [Geo](#geo-deployment) | GitLab EE with Geo enabled |
+| [Multi-node / HA with Geo](#multi-node--ha-deployment-with-geo) | GitLab CE/EE on multiple nodes |
+
+Each type of deployment will require that you hot reload the `puma` and `sidekiq` processes on all nodes running these
+services after you've upgraded. The reason for this is that those processes each load the GitLab Rails application which reads and loads
+the database schema into memory when starting up. Each of these processes will need to be reloaded (or restarted in the case of `sidekiq`)
+to re-read any database changes that have been made by post-deployment migrations.
+
+Most of the time you can safely upgrade from a patch release to the next minor
+release if the patch release is not the latest. For example, upgrading from
+9.1.1 to 9.2.0 should be safe even if 9.1.2 has been released. We do recommend
+you check the release posts of any releases between your current and target
+version just in case they include any migrations that may require you to upgrade
+1 release at a time.
+
+Some releases may also include so called "background migrations". These
+migrations are performed in the background by Sidekiq and are often used for
+migrating data. Background migrations are only added in the monthly releases.
+
+Certain major/minor releases may require a set of background migrations to be
+finished. To guarantee this, such a release processes any remaining jobs
+before continuing the upgrading procedure. While this doesn't require downtime
+(if the above conditions are met) we require that you [wait for background
+migrations to complete](index.md#checking-for-background-migrations-before-upgrading)
+between each major/minor release upgrade.
+The time necessary to complete these migrations can be reduced by
+increasing the number of Sidekiq workers that can process jobs in the
+`background_migration` queue. To see the size of this queue,
+[Check for background migrations before upgrading](index.md#checking-for-background-migrations-before-upgrading).
+
+As a rule of thumb, any database smaller than 10 GB doesn't take too much time to
+upgrade; perhaps an hour at most per minor release. Larger databases however may
+require more time, but this is highly dependent on the size of the database and
+the migrations that are being performed.
+
+To help explain this, let's look at some examples:
+
+**Example 1:** You are running a large GitLab installation using version 9.4.2,
+which is the latest patch release of 9.4. When GitLab 9.5.0 is released this
+installation can be safely upgraded to 9.5.0 without requiring downtime if the
+requirements mentioned above are met. You can also skip 9.5.0 and upgrade to
+9.5.1 after it's released, but you **can not** upgrade straight to 9.6.0; you
+_have_ to first upgrade to a 9.5.Z release.
+
+**Example 2:** You are running a large GitLab installation using version 9.4.2,
+which is the latest patch release of 9.4. GitLab 9.5 includes some background
+migrations, and 10.0 requires these to be completed (processing any
+remaining jobs for you). Skipping 9.5 is not possible without downtime, and due
+to the background migrations would require potentially hours of downtime
+depending on how long it takes for the background migrations to complete. To
+work around this you have to upgrade to 9.5.Z first, then wait at least a
+week before upgrading to 10.0.
+
+**Example 3:** You use MySQL as the database for GitLab. Any upgrade to a new
+major/minor release requires downtime. If a release includes any background
+migrations this could potentially lead to hours of downtime, depending on the
+size of your database. To work around this you must use PostgreSQL and
+meet the other online upgrade requirements mentioned above.
+
+## Single-node deployment
+
+Before following these instructions, note the following **important** information:
+
+- You can only upgrade 1 minor release at a time. So from 13.6 to 13.7, not to 13.8.
+ If you attempt more than one minor release, the upgrade may fail.
+- On single-node Omnibus deployments, updates with no downtime are not possible when
+ using Puma because Puma always requires a complete restart. This is because the
+ [phased restart](https://github.com/puma/puma/blob/master/README.md#clustered-mode)
+ feature of Puma does not work with the way it is configured in GitLab all-in-one
+ packages (cluster-mode with app preloading).
+- While it is possible to minimize downtime on a single-node instance by following
+ these instructions, **it is not possible to always achieve true zero downtime
+ updates**. Users may see some connections timeout or be refused for a few minutes,
+ depending on which services need to restart.
+
+1. Create an empty file at `/etc/gitlab/skip-auto-reconfigure`. This prevents upgrades from running `gitlab-ctl reconfigure`, which by default automatically stops GitLab, runs all database migrations, and restarts GitLab.
+
+ ```shell
+ sudo touch /etc/gitlab/skip-auto-reconfigure
+ ```
+
+1. Update the GitLab package:
+
+ - For GitLab Community Edition:
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update
+ sudo apt-get install gitlab-ce
+
+ # Centos/RHEL
+ sudo yum install gitlab-ce
+ ```
+
+ - For GitLab [Enterprise Edition](https://about.gitlab.com/pricing/):
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update
+ sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. To get the regular migrations and latest code in place, run
+
+ ```shell
+ sudo SKIP_POST_DEPLOYMENT_MIGRATIONS=true gitlab-ctl reconfigure
+ ```
+
+1. Once the node is updated and `reconfigure` finished successfully, run post-deployment migrations with
+
+ ```shell
+ sudo gitlab-rake db:migrate
+ ```
+
+1. Hot reload `puma` and `sidekiq` services
+
+ ```shell
+ sudo gitlab-ctl hup puma
+ sudo gitlab-ctl restart sidekiq
+ ```
+
+If you do not want to run zero downtime upgrades in the future, make
+sure you remove `/etc/gitlab/skip-auto-reconfigure` after
+you've completed these steps.
+
+## Multi-node / HA deployment
+
+You can only upgrade 1 minor release at a time. So from 13.6 to 13.7, not to 13.8.
+If you attempt more than one minor release, the upgrade may fail.
+
+### Use a load balancer in front of web (Puma) nodes
+
+With Puma, single node zero-downtime updates are no longer possible. To achieve
+HA with zero-downtime updates, at least two nodes are required to be used with a
+load balancer which distributes the connections properly across both nodes.
+
+The load balancer in front of the application nodes must be configured to check
+proper health check endpoints to check if the service is accepting traffic or
+not. For Puma, the `/-/readiness` endpoint should be used, while
+`/readiness` endpoint can be used for Sidekiq and other services.
+
+Upgrades on web (Puma) nodes must be done in a rolling manner, one after
+another, ensuring at least one node is always up to serve traffic. This is
+required to ensure zero-downtime.
+
+Puma will enter a blackout period as part of the upgrade, during which they
+continue to accept connections but will mark their respective health check
+endpoints to be unhealthy. On seeing this, the load balancer should disconnect
+them gracefully.
+
+Puma will restart only after completing all the currently processing requests.
+This ensures data and service integrity. Once they have restarted, the health
+check end points will be marked healthy.
+
+The nodes must be updated in the following order to update an HA instance using
+load balancer to latest GitLab version.
+
+1. Select one application node as a deploy node and complete the following steps
+ on it:
+
+ 1. Create an empty file at `/etc/gitlab/skip-auto-reconfigure`. This prevents upgrades from running `gitlab-ctl reconfigure`, which by default automatically stops GitLab, runs all database migrations, and restarts GitLab:
+
+ ```shell
+ sudo touch /etc/gitlab/skip-auto-reconfigure
+ ```
+
+ 1. Update the GitLab package:
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ce
+
+ # Centos/RHEL
+ sudo yum install gitlab-ce
+ ```
+
+ If you are an Enterprise Edition user, replace `gitlab-ce` with
+ `gitlab-ee` in the above command.
+
+ 1. Get the regular migrations and latest code in place:
+
+ ```shell
+ sudo SKIP_POST_DEPLOYMENT_MIGRATIONS=true gitlab-ctl reconfigure
+ ```
+
+ 1. Ensure services use the latest code:
+
+ ```shell
+ sudo gitlab-ctl hup puma
+ sudo gitlab-ctl restart sidekiq
+ ```
+
+1. Complete the following steps on the other Puma/Sidekiq nodes, one
+ after another. Always ensure at least one of such nodes is up and running,
+ and connected to the load balancer before proceeding to the next node.
+
+ 1. Update the GitLab package and ensure a `reconfigure` is run as part of
+ it. If not (due to `/etc/gitlab/skip-auto-reconfigure` file being
+ present), run `sudo gitlab-ctl reconfigure` manually.
+
+ 1. Ensure services use latest code:
+
+ ```shell
+ sudo gitlab-ctl hup puma
+ sudo gitlab-ctl restart sidekiq
+ ```
+
+1. On the deploy node, run the post-deployment migrations:
+
+ ```shell
+ sudo gitlab-rake db:migrate
+ ```
+
+### Gitaly Cluster
+
+[Gitaly Cluster](../administration/gitaly/praefect.md) is built using
+Gitaly and the Praefect component. It has its own PostgreSQL database, independent of the rest of
+the application.
+
+Before you update the main application you need to update Praefect.
+Out of your Praefect nodes, pick one to be your Praefect deploy node.
+This is where you will install the new Omnibus package first and run
+database migrations.
+
+**Praefect deploy node**
+
+- Create an empty file at `/etc/gitlab/skip-auto-reconfigure`. This prevents upgrades from running `gitlab-ctl reconfigure`, which by default automatically stops GitLab, runs all database migrations, and restarts GitLab:
+
+ ```shell
+ sudo touch /etc/gitlab/skip-auto-reconfigure
+ ```
+
+- Ensure that `praefect['auto_migrate'] = true` is set in `/etc/gitlab/gitlab.rb`
+
+**All Praefect nodes _excluding_ the Praefect deploy node**
+
+- To prevent `reconfigure` from automatically running database migrations, ensure that `praefect['auto_migrate'] = false` is set in `/etc/gitlab/gitlab.rb`.
+
+**Praefect deploy node**
+
+- Update the GitLab package:
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ce
+
+ # Centos/RHEL
+ sudo yum install gitlab-ce
+ ```
+
+ If you are an Enterprise Edition user, replace `gitlab-ce` with `gitlab-ee` in the above command.
+
+- To apply the Praefect database migrations and restart Praefect, run:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**All Praefect nodes _excluding_ the Praefect deploy node**
+
+- Update the GitLab package:
+
+ ```shell
+ sudo apt-get update && sudo apt-get install gitlab-ce
+ ```
+
+ If you are an Enterprise Edition user, replace `gitlab-ce` with `gitlab-ee` in the above command.
+
+- Ensure nodes are running the latest code:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+### Use PostgreSQL HA
+
+Pick a node to be the `Deploy Node`. It can be any application node, but it must be the same
+node throughout the process.
+
+**Deploy node**
+
+- Create an empty file at `/etc/gitlab/skip-auto-reconfigure`. This prevents upgrades from running `gitlab-ctl reconfigure`, which by default automatically stops GitLab, runs all database migrations, and restarts GitLab.
+
+ ```shell
+ sudo touch /etc/gitlab/skip-auto-reconfigure
+ ```
+
+**All nodes _including_ the Deploy node**
+
+- To prevent `reconfigure` from automatically running database migrations, ensure that `gitlab_rails['auto_migrate'] = false` is set in `/etc/gitlab/gitlab.rb`.
+
+**Gitaly only nodes**
+
+- Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ce
+
+ # Centos/RHEL
+ sudo yum install gitlab-ce
+ ```
+
+ If you are an Enterprise Edition user, replace `gitlab-ce` with `gitlab-ee` in the above command.
+
+- Ensure nodes are running the latest code
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**Deploy node**
+
+- Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ce
+
+ # Centos/RHEL
+ sudo yum install gitlab-ce
+ ```
+
+ If you are an Enterprise Edition user, replace `gitlab-ce` with `gitlab-ee` in the above command.
+
+- If you're using PgBouncer:
+
+ You'll need to bypass PgBouncer and connect directly to the database master
+ before running migrations.
+
+ Rails uses an advisory lock when attempting to run a migration to prevent
+ concurrent migrations from running on the same database. These locks are
+ not shared across transactions, resulting in `ActiveRecord::ConcurrentMigrationError`
+ and other issues when running database migrations using PgBouncer in transaction
+ pooling mode.
+
+ To find the master node, run the following on a database node:
+
+ ```shell
+ sudo gitlab-ctl patroni members
+ ```
+
+ Then, in your `gitlab.rb` file on the deploy node, update
+ `gitlab_rails['db_host']` and `gitlab_rails['db_port']` with the database
+ master's host and port.
+
+- To get the regular database migrations and latest code in place, run
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ sudo SKIP_POST_DEPLOYMENT_MIGRATIONS=true gitlab-rake db:migrate
+ ```
+
+**All nodes _excluding_ the Deploy node**
+
+- Update the GitLab package
+
+ ```shell
+ sudo apt-get update && sudo apt-get install gitlab-ce
+ ```
+
+ If you are an Enterprise Edition user, replace `gitlab-ce` with `gitlab-ee` in the above command.
+
+- Ensure nodes are running the latest code
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**Deploy node**
+
+- Run post-deployment database migrations on deploy node to complete the migrations with
+
+ ```shell
+ sudo gitlab-rake db:migrate
+ ```
+
+**For nodes that run Puma or Sidekiq**
+
+- Hot reload `puma` and `sidekiq` services
+
+ ```shell
+ sudo gitlab-ctl hup puma
+ sudo gitlab-ctl restart sidekiq
+ ```
+
+- If you're using PgBouncer:
+
+ Change your `gitlab.rb` to point back to PgBouncer and run:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+If you do not want to run zero downtime upgrades in the future, make
+sure you remove `/etc/gitlab/skip-auto-reconfigure` and revert
+setting `gitlab_rails['auto_migrate'] = false` in
+`/etc/gitlab/gitlab.rb` after you've completed these steps.
+
+### Use Redis HA (using Sentinel) **(PREMIUM ONLY)**
+
+Package upgrades may involve version updates to the bundled Redis service. On
+instances using [Redis for scaling](../administration/redis/index.md),
+upgrades must follow a proper order to ensure minimum downtime, as specified
+below. This doc assumes the official guides are being followed to setup Redis
+HA.
+
+#### In the application node
+
+According to [official Redis docs](https://redis.io/topics/admin#upgrading-or-restarting-a-redis-instance-without-downtime),
+the easiest way to update an HA instance using Sentinel is to upgrade the
+secondaries one after the other, perform a manual failover from current
+primary (running old version) to a recently upgraded secondary (running a new
+version), and then upgrade the original primary. For this, we need to know
+the address of the current Redis primary.
+
+- If your application node is running GitLab 12.7.0 or later, you can use the
+following command to get address of current Redis primary
+
+ ```shell
+ sudo gitlab-ctl get-redis-master
+ ```
+
+- If your application node is running a version older than GitLab 12.7.0, you
+ will have to run the underlying `redis-cli` command (which `get-redis-master`
+ command uses) to fetch information about the primary.
+
+ 1. Get the address of one of the sentinel nodes specified as
+ `gitlab_rails['redis_sentinels']` in `/etc/gitlab/gitlab.rb`
+
+ 1. Get the Redis master name specified as `redis['master_name']` in
+ `/etc/gitlab/gitlab.rb`
+
+ 1. Run the following command
+
+ ```shell
+ sudo /opt/gitlab/embedded/bin/redis-cli -h <sentinel host> -p <sentinel port> SENTINEL get-master-addr-by-name <redis master name>
+ ```
+
+#### In the Redis secondary nodes
+
+1. Install package for new version.
+
+1. Run `sudo gitlab-ctl reconfigure`, if a reconfigure is not run as part of
+ installation (due to `/etc/gitlab/skip-auto-reconfigure` file being present).
+
+1. If reconfigure warns about a pending Redis/Sentinel restart, restart the
+ corresponding service
+
+ ```shell
+ sudo gitlab-ctl restart redis
+ sudo gitlab-ctl restart sentinel
+ ```
+
+#### In the Redis primary node
+
+Before upgrading the Redis primary node, we need to perform a failover so that
+one of the recently upgraded secondary nodes becomes the new primary. Once the
+failover is complete, we can go ahead and upgrade the original primary node.
+
+1. Stop Redis service in Redis primary node so that it fails over to a secondary
+ node
+
+ ```shell
+ sudo gitlab-ctl stop redis
+ ```
+
+1. Wait for failover to be complete. You can verify it by periodically checking
+ details of the current Redis primary node (as mentioned above). If it starts
+ reporting a new IP, failover is complete.
+
+1. Start Redis again in that node, so that it starts following the current
+ primary node.
+
+ ```shell
+ sudo gitlab-ctl start redis
+ ```
+
+1. Install package corresponding to new version.
+
+1. Run `sudo gitlab-ctl reconfigure`, if a reconfigure is not run as part of
+ installation (due to `/etc/gitlab/skip-auto-reconfigure` file being present).
+
+1. If reconfigure warns about a pending Redis/Sentinel restart, restart the
+ corresponding service
+
+ ```shell
+ sudo gitlab-ctl restart redis
+ sudo gitlab-ctl restart sentinel
+ ```
+
+#### Update the application node
+
+Install the package for new version and follow regular package upgrade
+procedure.
+
+## Geo deployment **(PREMIUM ONLY)**
+
+The order of steps is important. While following these steps, make
+sure you follow them in the right order, on the correct node.
+
+Log in to your **primary** node, executing the following:
+
+1. Create an empty file at `/etc/gitlab/skip-auto-reconfigure`. This prevents upgrades from running `gitlab-ctl reconfigure`, which by default automatically stops GitLab, runs all database migrations, and restarts GitLab.
+
+ ```shell
+ sudo touch /etc/gitlab/skip-auto-reconfigure
+ ```
+
+1. Edit `/etc/gitlab/gitlab.rb` and ensure the following is present:
+
+ ```ruby
+ gitlab_rails['auto_migrate'] = false
+ ```
+
+1. Reconfigure GitLab:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+1. Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. To get the database migrations and latest code in place, run
+
+ ```shell
+ sudo SKIP_POST_DEPLOYMENT_MIGRATIONS=true gitlab-ctl reconfigure
+ ```
+
+1. Hot reload `puma` and `sidekiq` services
+
+ ```shell
+ sudo gitlab-ctl hup puma
+ sudo gitlab-ctl restart sidekiq
+ ```
+
+On each **secondary** node, executing the following:
+
+1. Create an empty file at `/etc/gitlab/skip-auto-reconfigure`. This prevents upgrades from running `gitlab-ctl reconfigure`, which by default automatically stops GitLab, runs all database migrations, and restarts GitLab.
+
+ ```shell
+ sudo touch /etc/gitlab/skip-auto-reconfigure
+ ```
+
+1. Edit `/etc/gitlab/gitlab.rb` and ensure the following is present:
+
+ ```ruby
+ gitlab_rails['auto_migrate'] = false
+ ```
+
+1. Reconfigure GitLab:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+1. Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. To get the database migrations and latest code in place, run
+
+ ```shell
+ sudo SKIP_POST_DEPLOYMENT_MIGRATIONS=true gitlab-ctl reconfigure
+ ```
+
+1. Hot reload `puma`, `sidekiq` and restart `geo-logcursor` services
+
+ ```shell
+ sudo gitlab-ctl hup puma
+ sudo gitlab-ctl restart sidekiq
+ sudo gitlab-ctl restart geo-logcursor
+ ```
+
+1. Run post-deployment database migrations, specific to the Geo database
+
+ ```shell
+ sudo gitlab-rake geo:db:migrate
+ ```
+
+After all **secondary** nodes are updated, finalize
+the update on the **primary** node:
+
+- Run post-deployment database migrations
+
+ ```shell
+ sudo gitlab-rake db:migrate
+ ```
+
+After updating all nodes (both **primary** and all **secondaries**), check their status:
+
+- Verify Geo configuration and dependencies
+
+ ```shell
+ sudo gitlab-rake gitlab:geo:check
+ ```
+
+If you do not want to run zero downtime upgrades in the future, make
+sure you remove `/etc/gitlab/skip-auto-reconfigure` and revert
+setting `gitlab_rails['auto_migrate'] = false` in
+`/etc/gitlab/gitlab.rb` after you've completed these steps.
+
+## Multi-node / HA deployment with Geo **(PREMIUM ONLY)**
+
+This section describes the steps required to upgrade a multi-node / HA
+deployment with Geo. Some steps must be performed on a particular node. This
+node will be known as the “deploy node” and is noted through the following
+instructions.
+
+Updates must be performed in the following order:
+
+1. Update Geo **primary** multi-node deployment.
+1. Update Geo **secondary** multi-node deployments.
+1. Post-deployment migrations and checks.
+
+### Step 1: Choose a "deploy node" for each deployment
+
+You now need to choose:
+
+- One instance for use as the **primary** "deploy node" on the Geo **primary** multi-node deployment.
+- One instance for use as the **secondary** "deploy node" on each Geo **secondary** multi-node deployment.
+
+Deploy nodes must be configured to be running Puma or Sidekiq or the `geo-logcursor` daemon. In order
+to avoid any downtime, they must not be in use during the update:
+
+- If running Puma remove the deploy node from the load balancer.
+- If running Sidekiq, ensure the deploy node is not processing jobs:
+
+ ```shell
+ sudo gitlab-ctl stop sidekiq
+ ```
+
+- If running `geo-logcursor` daemon, ensure the deploy node is not processing events:
+
+ ```shell
+ sudo gitlab-ctl stop geo-logcursor
+ ```
+
+For zero-downtime, Puma, Sidekiq, and `geo-logcursor` must be running on other nodes during the update.
+
+### Step 2: Update the Geo primary multi-node deployment
+
+**On all primary nodes _including_ the primary "deploy node"**
+
+1. Create an empty file at `/etc/gitlab/skip-auto-reconfigure`. This prevents upgrades from running `gitlab-ctl reconfigure`, which by default automatically stops GitLab, runs all database migrations, and restarts GitLab.
+
+```shell
+sudo touch /etc/gitlab/skip-auto-reconfigure
+```
+
+1. To prevent `reconfigure` from automatically running database migrations, ensure that `gitlab_rails['auto_migrate'] = false` is set in `/etc/gitlab/gitlab.rb`.
+
+1. Ensure nodes are running the latest code
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**On primary Gitaly only nodes**
+
+1. Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. Ensure nodes are running the latest code
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**On the primary "deploy node"**
+
+1. Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. If you're using PgBouncer:
+
+ You'll need to bypass PgBouncer and connect directly to the database master
+ before running migrations.
+
+ Rails uses an advisory lock when attempting to run a migration to prevent
+ concurrent migrations from running on the same database. These locks are
+ not shared across transactions, resulting in `ActiveRecord::ConcurrentMigrationError`
+ and other issues when running database migrations using PgBouncer in transaction
+ pooling mode.
+
+ To find the master node, run the following on a database node:
+
+ ```shell
+ sudo gitlab-ctl patroni members
+ ```
+
+ Then, in your `gitlab.rb` file on the deploy node, update
+ `gitlab_rails['db_host']` and `gitlab_rails['db_port']` with the database
+ master's host and port.
+
+1. To get the regular database migrations and latest code in place, run
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ sudo SKIP_POST_DEPLOYMENT_MIGRATIONS=true gitlab-rake db:migrate
+ ```
+
+1. If this deploy node is normally used to serve requests or process jobs,
+ then you may return it to service at this point.
+
+ - To serve requests, add the deploy node to the load balancer.
+ - To process Sidekiq jobs again, start Sidekiq:
+
+ ```shell
+ sudo gitlab-ctl start sidekiq
+ ```
+
+**On all primary nodes _excluding_ the primary "deploy node"**
+
+1. Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. Ensure nodes are running the latest code
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**For all primary nodes that run Puma or Sidekiq _including_ the primary "deploy node"**
+
+Hot reload `puma` and `sidekiq` services:
+
+```shell
+sudo gitlab-ctl hup puma
+sudo gitlab-ctl restart sidekiq
+```
+
+### Step 3: Update each Geo secondary multi-node deployment
+
+Only proceed if you have successfully completed all steps on the Geo **primary** multi-node deployment.
+
+**On all secondary nodes _including_ the secondary "deploy node"**
+
+1. Create an empty file at `/etc/gitlab/skip-auto-reconfigure`. This prevents upgrades from running `gitlab-ctl reconfigure`, which by default automatically stops GitLab, runs all database migrations, and restarts GitLab.
+
+```shell
+sudo touch /etc/gitlab/skip-auto-reconfigure
+```
+
+1. To prevent `reconfigure` from automatically running database migrations, ensure that `geo_secondary['auto_migrate'] = false` is set in `/etc/gitlab/gitlab.rb`.
+
+1. Ensure nodes are running the latest code
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**On secondary Gitaly only nodes**
+
+1. Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. Ensure nodes are running the latest code
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**On the secondary "deploy node"**
+
+1. Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. To get the regular database migrations and latest code in place, run
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ sudo SKIP_POST_DEPLOYMENT_MIGRATIONS=true gitlab-rake geo:db:migrate
+ ```
+
+1. If this deploy node is normally used to serve requests or perform
+ background processing, then you may return it to service at this point.
+
+ - To serve requests, add the deploy node to the load balancer.
+ - To process Sidekiq jobs again, start Sidekiq:
+
+ ```shell
+ sudo gitlab-ctl start sidekiq
+ ```
+
+ - To process Geo events again, start the `geo-logcursor` daemon:
+
+ ```shell
+ sudo gitlab-ctl start geo-logcursor
+ ```
+
+**On all secondary nodes _excluding_ the secondary "deploy node"**
+
+1. Update the GitLab package
+
+ ```shell
+ # Debian/Ubuntu
+ sudo apt-get update && sudo apt-get install gitlab-ee
+
+ # Centos/RHEL
+ sudo yum install gitlab-ee
+ ```
+
+1. Ensure nodes are running the latest code
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**For all secondary nodes that run Puma, Sidekiq, or the `geo-logcursor` daemon _including_ the secondary "deploy node"**
+
+Hot reload `puma`, `sidekiq` and ``geo-logcursor`` services:
+
+```shell
+sudo gitlab-ctl hup puma
+sudo gitlab-ctl restart sidekiq
+sudo gitlab-ctl restart geo-logcursor
+```
+
+### Step 4: Run post-deployment migrations and checks
+
+**On the primary "deploy node"**
+
+1. Run post-deployment database migrations:
+
+ ```shell
+ sudo gitlab-rake db:migrate
+ ```
+
+1. Verify Geo configuration and dependencies
+
+ ```shell
+ sudo gitlab-rake gitlab:geo:check
+ ```
+
+1. If you're using PgBouncer:
+
+ Change your `gitlab.rb` to point back to PgBouncer and run:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+**On all secondary "deploy nodes"**
+
+1. Run post-deployment database migrations, specific to the Geo database:
+
+ ```shell
+ sudo gitlab-rake geo:db:migrate
+ ```
+
+1. Verify Geo configuration and dependencies
+
+ ```shell
+ sudo gitlab-rake gitlab:geo:check
+ ```
+
+1. Verify Geo status
+
+ ```shell
+ sudo gitlab-rake geo:status
+ ```
diff --git a/doc/user/project/import/bitbucket.md b/doc/user/project/import/bitbucket.md
index 802eb3efc51..cda018a0c37 100644
--- a/doc/user/project/import/bitbucket.md
+++ b/doc/user/project/import/bitbucket.md
@@ -16,18 +16,18 @@ Import your projects from Bitbucket Cloud to GitLab with minimal effort.
The Bitbucket importer can import:
-- Repository description (GitLab 7.7+)
-- Git repository data (GitLab 7.7+)
-- Issues (GitLab 7.7+)
-- Issue comments (GitLab 8.15+)
-- Pull requests (GitLab 8.4+)
-- Pull request comments (GitLab 8.15+)
-- Milestones (GitLab 8.15+)
-- Wiki (GitLab 8.15+)
+- Repository description
+- Git repository data
+- Issues
+- Issue comments
+- Pull requests
+- Pull request comments
+- Milestones
+- Wiki
When importing:
-- References to pull requests and issues are preserved (GitLab 8.7+).
+- References to pull requests and issues are preserved.
- Repository public access is retained. If a repository is private in Bitbucket, it's created as
private in GitLab as well.
diff --git a/lib/api/projects_relation_builder.rb b/lib/api/projects_relation_builder.rb
index d7c7a2d59b1..db46602cd90 100644
--- a/lib/api/projects_relation_builder.rb
+++ b/lib/api/projects_relation_builder.rb
@@ -27,8 +27,6 @@ module API
end
def preload_repository_cache(projects_relation)
- return unless Feature.enabled?(:preload_repo_cache, default_enabled: :yaml)
-
repositories = repositories_for_preload(projects_relation)
Gitlab::RepositoryCache::Preloader.new(repositories).preload( # rubocop:disable CodeReuse/ActiveRecord
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index daa0f98ccbf..e0a6bbcc2e4 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -11899,9 +11899,6 @@ msgid_plural "Dismiss %d selected vulnerabilities as"
msgstr[0] ""
msgstr[1] ""
-msgid "Dismiss Value Stream Analytics introduction box"
-msgstr ""
-
msgid "Dismiss merge request promotion"
msgstr ""
@@ -18247,9 +18244,6 @@ msgstr ""
msgid "Interval Pattern"
msgstr ""
-msgid "Introducing Value Stream Analytics"
-msgstr ""
-
msgid "Introducing Your DevOps Report"
msgstr ""
@@ -36989,9 +36983,6 @@ msgstr ""
msgid "Value Stream Analytics can help you determine your team’s velocity"
msgstr ""
-msgid "Value Stream Analytics gives an overview of how much time it takes to go from idea to production in your project."
-msgstr ""
-
msgid "Value might contain a variable reference"
msgstr ""
diff --git a/spec/db/schema_spec.rb b/spec/db/schema_spec.rb
index c7739e2ff5f..ee1f4fda11c 100644
--- a/spec/db/schema_spec.rb
+++ b/spec/db/schema_spec.rb
@@ -35,7 +35,6 @@ RSpec.describe 'Database schema' do
cluster_providers_gcp: %w[gcp_project_id operation_id],
compliance_management_frameworks: %w[group_id],
commit_user_mentions: %w[commit_id],
- dep_ci_build_trace_sections: %w[build_id],
deploy_keys_projects: %w[deploy_key_id],
deployments: %w[deployable_id user_id],
draft_notes: %w[discussion_id commit_id],
diff --git a/spec/frontend/cycle_analytics/banner_spec.js b/spec/frontend/cycle_analytics/banner_spec.js
deleted file mode 100644
index ef7998c5ff5..00000000000
--- a/spec/frontend/cycle_analytics/banner_spec.js
+++ /dev/null
@@ -1,47 +0,0 @@
-import { shallowMount } from '@vue/test-utils';
-import Banner from '~/cycle_analytics/components/banner.vue';
-
-describe('Value Stream Analytics banner', () => {
- let wrapper;
-
- const createComponent = () => {
- wrapper = shallowMount(Banner, {
- propsData: {
- documentationLink: 'path',
- },
- });
- };
-
- beforeEach(() => {
- createComponent();
- });
-
- afterEach(() => {
- wrapper.destroy();
- });
-
- it('should render value stream analytics information', () => {
- expect(wrapper.find('h4').text().trim()).toBe('Introducing Value Stream Analytics');
-
- expect(
- wrapper
- .find('p')
- .text()
- .trim()
- .replace(/[\r\n]+/g, ' '),
- ).toContain(
- 'Value Stream Analytics gives an overview of how much time it takes to go from idea to production in your project.',
- );
-
- expect(wrapper.find('a').text().trim()).toBe('Read more');
- expect(wrapper.find('a').attributes('href')).toBe('path');
- });
-
- it('should emit an event when close button is clicked', async () => {
- jest.spyOn(wrapper.vm, '$emit').mockImplementation(() => {});
-
- await wrapper.find('.js-ca-dismiss-button').trigger('click');
-
- expect(wrapper.vm.$emit).toHaveBeenCalled();
- });
-});
diff --git a/spec/frontend/pipeline_editor/mock_data.js b/spec/frontend/pipeline_editor/mock_data.js
index 310727212a9..f2104f25324 100644
--- a/spec/frontend/pipeline_editor/mock_data.js
+++ b/spec/frontend/pipeline_editor/mock_data.js
@@ -159,15 +159,12 @@ export const mergeUnwrappedCiConfig = (mergedConfig) => {
export const mockCommitShaResults = {
data: {
project: {
- pipelines: {
- nodes: [
- {
- id: 'gid://gitlab/Ci::Pipeline/1',
+ repository: {
+ tree: {
+ lastCommit: {
sha: mockCommitSha,
- path: `/${mockProjectFullPath}/-/pipelines/488`,
- commitPath: `/${mockProjectFullPath}/-/commit/d0d56d363d8a3f67a8ab9fc00207d468f30032ca`,
},
- ],
+ },
},
},
},
@@ -176,21 +173,12 @@ export const mockCommitShaResults = {
export const mockNewCommitShaResults = {
data: {
project: {
- pipelines: {
- nodes: [
- {
- id: 'gid://gitlab/Ci::Pipeline/2',
+ repository: {
+ tree: {
+ lastCommit: {
sha: 'eeff1122',
- path: `/${mockProjectFullPath}/-/pipelines/489`,
- commitPath: `/${mockProjectFullPath}/-/commit/bb1abcfe3d8a3f67a8ab9fc00207d468f3022bee`,
},
- {
- id: 'gid://gitlab/Ci::Pipeline/1',
- sha: mockCommitSha,
- path: `/${mockProjectFullPath}/-/pipelines/488`,
- commitPath: `/${mockProjectFullPath}/-/commit/d0d56d363d8a3f67a8ab9fc00207d468f30032ca`,
- },
- ],
+ },
},
},
},
@@ -199,8 +187,12 @@ export const mockNewCommitShaResults = {
export const mockEmptyCommitShaResults = {
data: {
project: {
- pipelines: {
- nodes: [],
+ repository: {
+ tree: {
+ lastCommit: {
+ sha: '',
+ },
+ },
},
},
},
diff --git a/spec/frontend/pipeline_editor/pipeline_editor_app_spec.js b/spec/frontend/pipeline_editor/pipeline_editor_app_spec.js
index 2c9bedc6e76..393cad0546b 100644
--- a/spec/frontend/pipeline_editor/pipeline_editor_app_spec.js
+++ b/spec/frontend/pipeline_editor/pipeline_editor_app_spec.js
@@ -283,19 +283,6 @@ describe('Pipeline editor app component', () => {
expect(window.scrollTo).toHaveBeenCalledWith({ top: 0, behavior: 'smooth' });
});
- it('polls for commit sha while pipeline data is not yet available for newly committed branch', async () => {
- jest
- .spyOn(wrapper.vm.$apollo.queries.commitSha, 'startPolling')
- .mockImplementation(jest.fn());
-
- // simulate updating current branch (which triggers commitSha refetch)
- // while pipeline data is not yet available
- mockLatestCommitShaQuery.mockResolvedValue(mockEmptyCommitShaResults);
- await wrapper.vm.$apollo.queries.commitSha.refetch();
-
- expect(wrapper.vm.$apollo.queries.commitSha.startPolling).toHaveBeenCalledTimes(1);
- });
-
it('polls for commit sha while pipeline data is not yet available for current branch', async () => {
jest
.spyOn(wrapper.vm.$apollo.queries.commitSha, 'startPolling')
diff --git a/spec/frontend_integration/README.md b/spec/frontend_integration/README.md
index 573a385d81e..377294fb19f 100644
--- a/spec/frontend_integration/README.md
+++ b/spec/frontend_integration/README.md
@@ -11,6 +11,33 @@ Frontend integration specs:
As a result, they deserve their own special place.
+## Run frontend integration tests locally
+
+The frontend integration specs are all about testing integration frontend bundles against a
+mock backend. The mock backend is built using the fixtures and GraphQL schema.
+
+We can generate the necessary fixtures and GraphQL schema by running:
+
+```shell
+bundle exec rake frontend:fixtures gitlab:graphql:schema:dump
+```
+
+Then we can use [Jest](https://jestjs.io/) to run the frontend integration tests:
+
+```shell
+yarn jest:integration <path-to-integration-test>
+```
+
+If you'd like to run the frontend integration specs **without** setting up the fixtures first, then you
+can set `GL_IGNORE_WARNINGS=1`:
+
+```shell
+GL_IGNORE_WARNINGS=1 yarn jest:integration <path-to-integration-test>
+```
+
+The `jest-integration` job executes the frontend integration tests in our
+CI/CD pipelines.
+
## References
- https://docs.gitlab.com/ee/development/testing_guide/testing_levels.html#frontend-integration-tests
diff --git a/spec/models/ci/pipeline_spec.rb b/spec/models/ci/pipeline_spec.rb
index b3d0b6af926..1007d64438f 100644
--- a/spec/models/ci/pipeline_spec.rb
+++ b/spec/models/ci/pipeline_spec.rb
@@ -2037,16 +2037,6 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep do
it 'returns external pull request modified paths' do
expect(pipeline.modified_paths).to match(external_pull_request.modified_paths)
end
-
- context 'when the FF ci_modified_paths_of_external_prs is disabled' do
- before do
- stub_feature_flags(ci_modified_paths_of_external_prs: false)
- end
-
- it 'returns nil' do
- expect(pipeline.modified_paths).to be_nil
- end
- end
end
end