Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab/ci/review-apps/main.gitlab-ci.yml5
-rw-r--r--.gitlab/ci/rules.gitlab-ci.yml38
-rw-r--r--app/helpers/todos_helper.rb7
-rw-r--r--app/services/packages/debian/process_package_file_service.rb101
-rw-r--r--app/views/groups/_home_panel.html.haml50
-rw-r--r--app/views/projects/_home_panel.html.haml31
-rw-r--r--app/views/projects/buttons/_fork.html.haml6
-rw-r--r--app/views/projects/buttons/_star.html.haml12
-rw-r--r--app/views/shared/projects/_project.html.haml2
-rw-r--r--config/feature_flags/ops/search_curation_dry_run.yml8
-rw-r--r--db/docs/pm_licenses.yml9
-rw-r--r--db/docs/pm_package_version_licenses.yml9
-rw-r--r--db/docs/pm_package_versions.yml9
-rw-r--r--db/docs/pm_packages.yml9
-rw-r--r--db/migrate/20221101174816_create_package_metadata.rb11
-rw-r--r--db/migrate/20221101194416_create_package_metadata_versions.rb11
-rw-r--r--db/migrate/20221101195309_create_package_metadata_licenses.rb10
-rw-r--r--db/migrate/20221101195543_create_package_metadata_package_version_licenses.rb12
-rw-r--r--db/schema_migrations/202211011748161
-rw-r--r--db/schema_migrations/202211011944161
-rw-r--r--db/schema_migrations/202211011953091
-rw-r--r--db/schema_migrations/202211011955431
-rw-r--r--db/structure.sql91
-rw-r--r--doc/administration/geo/replication/tuning.md2
-rw-r--r--doc/administration/geo/setup/database.md3
-rw-r--r--doc/administration/operations/index.md1
-rw-r--r--doc/administration/sidekiq/extra_sidekiq_processes.md286
-rw-r--r--doc/administration/sidekiq/extra_sidekiq_routing.md166
-rw-r--r--doc/administration/sidekiq/index.md2
-rw-r--r--doc/administration/sidekiq/processing_specific_job_classes.md240
-rw-r--r--doc/administration/sidekiq/sidekiq_job_migration.md17
-rw-r--r--doc/architecture/blueprints/ci_data_decay/pipeline_partitioning.md7
-rw-r--r--doc/ci/environments/index.md13
-rw-r--r--doc/development/sidekiq/index.md4
-rw-r--r--doc/index.md23
-rw-r--r--doc/integration/advanced_search/elasticsearch.md8
-rw-r--r--doc/integration/advanced_search/elasticsearch_troubleshooting.md2
-rw-r--r--doc/update/index.md6
-rw-r--r--lib/api/concerns/packages/npm_endpoints.rb34
-rw-r--r--lib/api/helpers/packages/dependency_proxy_helpers.rb6
-rw-r--r--lib/api/nuget_project_packages.rb16
-rw-r--r--lib/gitlab/database.rb3
-rw-r--r--lib/gitlab/database/gitlab_schemas.yml4
-rw-r--r--lib/gitlab/memory/jemalloc.rb4
-rw-r--r--lib/gitlab/memory/reports/jemalloc_stats.rb12
-rw-r--r--lib/gitlab/memory/reports_daemon.rb19
-rw-r--r--lib/gitlab/middleware/compressed_json.rb27
-rw-r--r--locale/gitlab.pot3
-rw-r--r--spec/features/projects/user_sees_sidebar_spec.rb2
-rw-r--r--spec/helpers/todos_helper_spec.rb28
-rw-r--r--spec/lib/gitlab/memory/jemalloc_spec.rb8
-rw-r--r--spec/lib/gitlab/memory/reports/jemalloc_stats_spec.rb7
-rw-r--r--spec/lib/gitlab/memory/reports_daemon_spec.rb1
-rw-r--r--spec/lib/gitlab/middleware/compressed_json_spec.rb113
-rw-r--r--spec/requests/api/npm_instance_packages_spec.rb12
-rw-r--r--spec/requests/api/npm_project_packages_spec.rb12
-rw-r--r--spec/requests/api/nuget_project_packages_spec.rb46
-rw-r--r--spec/services/packages/debian/process_changes_service_spec.rb4
-rw-r--r--spec/services/packages/debian/process_package_file_service_spec.rb160
-rw-r--r--spec/support/shared_examples/requests/api/npm_packages_shared_examples.rb165
60 files changed, 1347 insertions, 554 deletions
diff --git a/.gitlab/ci/review-apps/main.gitlab-ci.yml b/.gitlab/ci/review-apps/main.gitlab-ci.yml
index 85c5c7d1b1d..d8012c963c1 100644
--- a/.gitlab/ci/review-apps/main.gitlab-ci.yml
+++ b/.gitlab/ci/review-apps/main.gitlab-ci.yml
@@ -96,7 +96,7 @@ review-build-cng:
name: review/${CI_COMMIT_REF_SLUG}${SCHEDULE_TYPE} # No separator for SCHEDULE_TYPE so it's compatible as before and looks nice without it
url: https://gitlab-${CI_ENVIRONMENT_SLUG}.${REVIEW_APPS_DOMAIN}
on_stop: review-stop
- auto_stop_in: 48 hours
+ auto_stop_in: 6 hours
review-deploy:
extends:
@@ -160,7 +160,8 @@ review-deploy-sample-projects:
# See https://gitlab.com/gitlab-org/gitlab/issues/191273
GIT_DEPTH: 1
before_script:
- - *base-before_script
+ - source ./scripts/utils.sh
+ - source ./scripts/review_apps/review-apps.sh
review-delete-deployment:
extends:
diff --git a/.gitlab/ci/rules.gitlab-ci.yml b/.gitlab/ci/rules.gitlab-ci.yml
index db7b6473c06..707980c3513 100644
--- a/.gitlab/ci/rules.gitlab-ci.yml
+++ b/.gitlab/ci/rules.gitlab-ci.yml
@@ -95,26 +95,23 @@
.if-fork-merge-request: &if-fork-merge-request
if: '$CI_PROJECT_NAMESPACE !~ /^gitlab(-org)?($|\/)/ && $CI_MERGE_REQUEST_IID && $CI_MERGE_REQUEST_LABELS !~ /pipeline:run-all-rspec/'
-.if-default-branch-schedule-maintenance: &if-default-branch-schedule-maintenance
- if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "maintenance"'
+.if-schedule-maintenance: &if-schedule-maintenance
+ if: '$CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "maintenance"'
.if-default-branch-schedule-nightly: &if-default-branch-schedule-nightly
if: '$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "nightly"'
+.if-ruby3-branch-schedule-nightly: &if-ruby3-branch-schedule-nightly
+ if: '$CI_COMMIT_BRANCH == "ruby3" && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "nightly"'
+
.if-security-schedule: &if-security-schedule
if: '$CI_PROJECT_NAMESPACE == "gitlab-org/security" && $CI_PIPELINE_SOURCE == "schedule"'
.if-dot-com-gitlab-org-schedule: &if-dot-com-gitlab-org-schedule
if: '$CI_SERVER_HOST == "gitlab.com" && $CI_PROJECT_NAMESPACE == "gitlab-org" && $CI_PIPELINE_SOURCE == "schedule"'
-.if-dot-com-ee-schedule: &if-dot-com-ee-schedule
- if: '$CI_SERVER_HOST == "gitlab.com" && $CI_PROJECT_PATH == "gitlab-org/gitlab" && $CI_PIPELINE_SOURCE == "schedule"'
-
-.if-dot-com-ee-schedule-maintenance: &if-dot-com-ee-schedule-maintenance
- if: '$CI_SERVER_HOST == "gitlab.com" && $CI_PROJECT_PATH == "gitlab-org/gitlab" && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "maintenance"'
-
-.if-dot-com-ee-schedule-nightly: &if-dot-com-ee-schedule-nightly
- if: '$CI_SERVER_HOST == "gitlab.com" && $CI_PROJECT_PATH == "gitlab-org/gitlab" && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "nightly"'
+.if-dot-com-ee-schedule-default-branch-maintenance: &if-dot-com-ee-schedule-default-branch-maintenance
+ if: '$CI_SERVER_HOST == "gitlab.com" && $CI_PROJECT_PATH == "gitlab-org/gitlab" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PIPELINE_SOURCE == "schedule" && $SCHEDULE_TYPE == "maintenance"'
.if-dot-com-ee-schedule-nightly-child-pipeline: &if-dot-com-ee-schedule-nightly-child-pipeline
if: '$CI_SERVER_HOST == "gitlab.com" && $CI_PROJECT_PATH == "gitlab-org/gitlab" && $CI_PIPELINE_SOURCE == "parent_pipeline" && $SCHEDULE_TYPE == "nightly"'
@@ -660,7 +657,7 @@
################
.shared:rules:update-cache:
rules:
- - <<: *if-default-branch-schedule-maintenance
+ - <<: *if-schedule-maintenance
- <<: *if-security-schedule
- <<: *if-merge-request-labels-update-caches
@@ -709,7 +706,7 @@
rules:
# That would run for any project that has a "maintenance" pipeline schedule
# but in fact, the cache package is only uploaded for gitlab.com/gitlab-org/gitlab and jihulab.com/gitlab-cn/gitlab
- - <<: *if-default-branch-schedule-maintenance
+ - <<: *if-schedule-maintenance
- <<: *if-dot-com-gitlab-org-default-branch
changes: ["workhorse/**/*"]
- <<: *if-dot-com-gitlab-org-merge-request
@@ -726,7 +723,7 @@
when: never
# That would run for any project that has a "maintenance" pipeline schedule
# but in fact, the cache package is only uploaded for gitlab.com/gitlab-org/gitlab and jihulab.com/gitlab-cn/gitlab
- - <<: *if-default-branch-schedule-maintenance
+ - <<: *if-schedule-maintenance
- <<: *if-dot-com-gitlab-org-default-branch
changes: *assets-compilation-patterns
- <<: *if-dot-com-gitlab-org-merge-request
@@ -744,7 +741,7 @@
when: never
# That would run for any project that has a "maintenance" pipeline schedule
# but in fact, the cache package is only uploaded for gitlab.com/gitlab-org/gitlab and jihulab.com/gitlab-cn/gitlab
- - <<: *if-default-branch-schedule-maintenance
+ - <<: *if-schedule-maintenance
- <<: *if-dot-com-gitlab-org-merge-request
changes:
- ".gitlab/ci/caching.gitlab-ci.yml"
@@ -1062,7 +1059,7 @@
###############
.pages:rules:
rules:
- - <<: *if-dot-com-ee-schedule-maintenance
+ - <<: *if-dot-com-ee-schedule-default-branch-maintenance
############
# QA rules #
@@ -1545,6 +1542,7 @@
- <<: *if-not-ee
when: never
- <<: *if-default-branch-schedule-nightly
+ - <<: *if-ruby3-branch-schedule-nightly
- <<: *if-merge-request-labels-run-all-rspec
.rails:rules:rspec-coverage:
@@ -1555,7 +1553,7 @@
when: never
- <<: *if-merge-request
changes: *code-backstage-patterns
- - <<: *if-default-branch-schedule-maintenance
+ - <<: *if-schedule-maintenance
- <<: *if-merge-request-labels-run-all-rspec
.rails:rules:rspec-undercoverage:
@@ -1845,7 +1843,7 @@
rules:
- if: "$PACKAGE_HUNTER_USER == null || $PACKAGE_HUNTER_USER == ''"
when: never
- - <<: *if-default-branch-schedule-maintenance
+ - <<: *if-schedule-maintenance
- <<: *if-merge-request
changes: ["yarn.lock"]
@@ -1853,7 +1851,7 @@
rules:
- if: "$PACKAGE_HUNTER_USER == null || $PACKAGE_HUNTER_USER == ''"
when: never
- - <<: *if-default-branch-schedule-maintenance
+ - <<: *if-schedule-maintenance
- <<: *if-merge-request
changes: ["Gemfile.lock"]
@@ -2057,7 +2055,7 @@
rules:
- <<: *if-not-ee
when: never
- - <<: *if-dot-com-ee-schedule-maintenance
+ - <<: *if-dot-com-ee-schedule-default-branch-maintenance
- <<: *if-default-refs
changes:
- ".gitlab/ci/setup.gitlab-ci.yml"
@@ -2079,7 +2077,7 @@
rules:
- <<: *if-not-ee
when: never
- - <<: *if-dot-com-ee-schedule-maintenance
+ - <<: *if-dot-com-ee-schedule-default-branch-maintenance
- <<: *if-default-refs
changes:
- ".gitlab/ci/test-metadata.gitlab-ci.yml"
diff --git a/app/helpers/todos_helper.rb b/app/helpers/todos_helper.rb
index be63d28600f..27067bd223f 100644
--- a/app/helpers/todos_helper.rb
+++ b/app/helpers/todos_helper.rb
@@ -222,7 +222,12 @@ module TodosHelper
end
content = content_tag(:span, class: css_class) do
- "Due #{is_due_today ? "today" : todo.target.due_date.to_s(:medium)}"
+ format(s_("Todos|Due %{due_date}"), due_date: if is_due_today
+ _("today")
+ else
+ l(todo.target.due_date,
+ format: Date::DATE_FORMATS[:medium])
+ end)
end
"&middot; #{content}".html_safe
diff --git a/app/services/packages/debian/process_package_file_service.rb b/app/services/packages/debian/process_package_file_service.rb
new file mode 100644
index 00000000000..59e8ac3425b
--- /dev/null
+++ b/app/services/packages/debian/process_package_file_service.rb
@@ -0,0 +1,101 @@
+# frozen_string_literal: true
+
+module Packages
+ module Debian
+ class ProcessPackageFileService
+ include ExclusiveLeaseGuard
+ include Gitlab::Utils::StrongMemoize
+
+ SOURCE_FIELD_SPLIT_REGEX = /[ ()]/.freeze
+ # used by ExclusiveLeaseGuard
+ DEFAULT_LEASE_TIMEOUT = 1.hour.to_i.freeze
+
+ def initialize(package_file, creator, distribution_name, component_name)
+ @package_file = package_file
+ @creator = creator
+ @distribution_name = distribution_name
+ @component_name = component_name
+ end
+
+ def execute
+ try_obtain_lease do
+ validate!
+
+ @package_file.transaction do
+ update_file_metadata
+ end
+
+ ::Packages::Debian::GenerateDistributionWorker.perform_async(:project, package.debian_distribution.id)
+ end
+ end
+
+ private
+
+ def validate!
+ raise ArgumentError, 'package file without Debian metadata' unless @package_file.debian_file_metadatum
+ raise ArgumentError, 'already processed package file' unless @package_file.debian_file_metadatum.unknown?
+
+ return if file_metadata[:file_type] == :deb || file_metadata[:file_type] == :udeb
+
+ raise ArgumentError, "invalid package file type: #{file_metadata[:file_type]}"
+ end
+
+ def update_file_metadata
+ ::Packages::UpdatePackageFileService.new(@package_file, package_id: package.id)
+ .execute
+
+ # Force reload from database, as package has changed
+ @package_file.reload_package
+
+ @package_file.debian_file_metadatum.update!(
+ file_type: file_metadata[:file_type],
+ component: @component_name,
+ architecture: file_metadata[:architecture],
+ fields: file_metadata[:fields]
+ )
+ end
+
+ def package
+ strong_memoize(:package) do
+ package_name = file_metadata[:fields]['Package']
+ package_version = file_metadata[:fields]['Version']
+
+ if file_metadata[:fields]['Source']
+ # "sample" or "sample (1.2.3~alpha2)"
+ source_field_parts = file_metadata[:fields]['Source'].split(SOURCE_FIELD_SPLIT_REGEX)
+ package_name = source_field_parts[0]
+ package_version = source_field_parts[2] || package_version
+ end
+
+ params = {
+ 'name': package_name,
+ 'version': package_version,
+ 'distribution_name': @distribution_name
+ }
+ response = Packages::Debian::FindOrCreatePackageService.new(project, @creator, params).execute
+ response.payload[:package]
+ end
+ end
+
+ def file_metadata
+ strong_memoize(:metadata) do
+ ::Packages::Debian::ExtractMetadataService.new(@package_file).execute
+ end
+ end
+
+ def project
+ @package_file.package.project
+ end
+
+ # used by ExclusiveLeaseGuard
+ def lease_key
+ "packages:debian:process_package_file_service:package_file:#{@package_file.id}"
+ end
+
+ # used by ExclusiveLeaseGuard
+ def lease_timeout
+ DEFAULT_LEASE_TIMEOUT
+ end
+ end
+ end
+end
diff --git a/app/views/groups/_home_panel.html.haml b/app/views/groups/_home_panel.html.haml
index a82a2e41508..1494990e427 100644
--- a/app/views/groups/_home_panel.html.haml
+++ b/app/views/groups/_home_panel.html.haml
@@ -3,16 +3,15 @@
- emails_disabled = @group.emails_disabled?
.group-home-panel
- .row.my-3
- .home-panel-title-row.col-md-12.col-lg-6.d-flex
+ .gl-display-flex.gl-justify-content-space-between.gl-flex-wrap.gl-sm-flex-direction-column.gl-gap-3.gl-my-5
+ .home-panel-title-row.gl-display-flex.gl-align-items-center
.avatar-container.rect-avatar.s64.home-panel-avatar.gl-flex-shrink-0.float-none{ class: 'gl-mr-3!' }
= group_icon(@group, class: 'avatar avatar-tile s64', width: 64, height: 64, itemprop: 'logo')
- .d-flex.flex-column.flex-wrap.align-items-baseline
- .d-inline-flex.align-items-baseline
- %h1.home-panel-title.gl-font-size-h1.gl-mt-3.gl-mb-2{ itemprop: 'name' }
- = @group.name
- %span.visibility-icon.gl-text-secondary.gl-ml-2.has-tooltip{ data: { container: 'body' }, title: visibility_icon_description(@group) }
- = visibility_level_icon(@group.visibility_level, options: {class: 'icon'})
+ %div
+ %h1.home-panel-title.gl-font-size-h1.gl-mt-3.gl-mb-2.gl-display-flex{ itemprop: 'name' }
+ = @group.name
+ %span.visibility-icon.gl-text-secondary.has-tooltip.gl-ml-2{ data: { container: 'body' }, title: visibility_icon_description(@group) }
+ = visibility_level_icon(@group.visibility_level, options: {class: 'icon'})
.home-panel-metadata.gl-text-secondary.gl-font-base.gl-font-weight-normal.gl-line-height-normal{ data: { qa_selector: 'group_id_content' }, itemprop: 'identifier' }
- if can?(current_user, :read_group, @group)
%span.gl-display-inline-block.gl-vertical-align-middle
@@ -22,24 +21,23 @@
%span.gl-ml-3.gl-mb-3
= render 'shared/members/access_request_links', source: @group
- .home-panel-buttons.col-md-12.col-lg-6
- - if current_user
- .gl-display-flex.gl-flex-wrap.gl-lg-justify-content-end.gl-mx-n2{ data: { testid: 'group-buttons' } }
- - if current_user.admin?
- = link_to [:admin, @group], class: 'btn btn-default gl-button btn-icon gl-mt-3 gl-mr-2', title: _('View group in admin area'),
- data: {toggle: 'tooltip', placement: 'bottom', container: 'body'} do
- = sprite_icon('admin')
- - if @notification_setting
- .js-vue-notification-dropdown{ data: { disabled: emails_disabled.to_s, dropdown_items: notification_dropdown_items(@notification_setting).to_json, notification_level: @notification_setting.level, help_page_path: help_page_path('user/profile/notifications'), group_id: @group.id, container_class: 'gl-mx-2 gl-mt-3 gl-vertical-align-top', no_flip: 'true' } }
- - if can_create_subgroups
- .gl-px-2.gl-sm-w-auto.gl-w-full
- = link_to _("New subgroup"),
- new_group_path(parent_id: @group.id, anchor: 'create-group-pane'),
- class: "btn btn-default gl-button gl-mt-3 gl-sm-w-auto gl-w-full",
- data: { qa_selector: 'new_subgroup_button' }
- - if can_create_projects
- .gl-px-2.gl-sm-w-auto.gl-w-full
- = link_to _("New project"), new_project_path(namespace_id: @group.id), class: "btn btn-confirm gl-button gl-mt-3 gl-sm-w-auto gl-w-full", data: { qa_selector: 'new_project_button' }
+ - if current_user
+ .home-panel-buttons.gl-display-flex.gl-justify-content-md-end.gl-align-items-center.gl-flex-wrap.gl-gap-3{ data: { testid: 'group-buttons' } }
+ - if current_user.admin?
+ = link_to [:admin, @group], class: 'btn btn-default gl-button btn-icon', title: _('View group in admin area'),
+ data: {toggle: 'tooltip', placement: 'bottom', container: 'body'} do
+ = sprite_icon('admin')
+ - if @notification_setting
+ .js-vue-notification-dropdown{ data: { disabled: emails_disabled.to_s, dropdown_items: notification_dropdown_items(@notification_setting).to_json, notification_level: @notification_setting.level, help_page_path: help_page_path('user/profile/notifications'), group_id: @group.id, container_class: 'gl-vertical-align-top', no_flip: 'true' } }
+ - if can_create_subgroups
+ .gl-sm-w-auto.gl-w-full
+ = link_to _("New subgroup"),
+ new_group_path(parent_id: @group.id, anchor: 'create-group-pane'),
+ class: "btn btn-default gl-button gl-sm-w-auto gl-w-full",
+ data: { qa_selector: 'new_subgroup_button' }
+ - if can_create_projects
+ .gl-sm-w-auto.gl-w-full
+ = link_to _("New project"), new_project_path(namespace_id: @group.id), class: "btn btn-confirm gl-button gl-sm-w-auto gl-w-full", data: { qa_selector: 'new_project_button' }
- if @group.description.present?
.group-home-desc.mt-1
diff --git a/app/views/projects/_home_panel.html.haml b/app/views/projects/_home_panel.html.haml
index a862b841008..3b240ee60ed 100644
--- a/app/views/projects/_home_panel.html.haml
+++ b/app/views/projects/_home_panel.html.haml
@@ -4,17 +4,16 @@
- cache_enabled = Feature.enabled?(:cache_home_panel, @project, type: :development)
.project-home-panel.js-show-on-project-root.gl-my-5{ class: [("empty-project" if empty_repo)] }
- .gl-display-flex.gl-justify-content-space-between.gl-flex-wrap.gl-sm-flex-direction-column.gl-mb-3
- .home-panel-title-row.gl-display-flex
+ .gl-display-flex.gl-justify-content-space-between.gl-flex-wrap.gl-sm-flex-direction-column.gl-mb-3.gl-gap-5
+ .home-panel-title-row.gl-display-flex.gl-align-items-center
%div{ class: 'avatar-container rect-avatar s64 home-panel-avatar gl-flex-shrink-0 gl-w-11 gl-h-11 gl-mr-3! float-none' }
= project_icon(@project, alt: @project.name, class: 'avatar avatar-tile s64', width: 64, height: 64, itemprop: 'image')
- .d-flex.flex-column.flex-wrap.align-items-baseline
- .d-inline-flex.align-items-baseline
- %h1.home-panel-title.gl-mt-3.gl-mb-2.gl-font-size-h1{ data: { qa_selector: 'project_name_content' }, itemprop: 'name' }
- = @project.name
- %span.visibility-icon.gl-text-secondary.gl-ml-2.has-tooltip{ data: { container: 'body' }, title: visibility_icon_description(@project) }
- = visibility_level_icon(@project.visibility_level, options: { class: 'icon' })
- = render_if_exists 'compliance_management/compliance_framework/compliance_framework_badge', project: @project
+ %div
+ %h1.home-panel-title.gl-font-size-h1.gl-mt-3.gl-mb-2.gl-display-flex{ data: { qa_selector: 'project_name_content' }, itemprop: 'name' }
+ = @project.name
+ %span.visibility-icon.gl-text-secondary.has-tooltip.gl-ml-2{ data: { container: 'body' }, title: visibility_icon_description(@project) }
+ = visibility_level_icon(@project.visibility_level, options: { class: 'icon' })
+ = render_if_exists 'compliance_management/compliance_framework/compliance_framework_badge', project: @project, additional_classes: 'gl-align-self-center gl-ml-2'
.home-panel-metadata.gl-font-sm.gl-text-secondary.gl-font-base.gl-font-weight-normal.gl-line-height-normal{ data: { qa_selector: 'project_id_content' }, itemprop: 'identifier' }
- if can?(current_user, :read_project, @project)
%span.gl-display-inline-block.gl-vertical-align-middle
@@ -25,19 +24,17 @@
= render 'shared/members/access_request_links', source: @project
= cache_if(cache_enabled, [@project, @project.star_count, @project.forks_count, :buttons, current_user, @notification_setting], expires_in: 1.day) do
- .project-repo-buttons.gl-display-flex.gl-justify-content-md-end.gl-align-items-start.gl-flex-wrap.gl-mt-5
+ .project-repo-buttons.gl-display-flex.gl-justify-content-md-end.gl-align-items-center.gl-flex-wrap.gl-gap-3
- if current_user
- if current_user.admin?
- = link_to [:admin, @project], class: 'btn gl-button btn-icon gl-align-self-start gl-py-2! gl-mr-3', title: _('View project in admin area'),
+ = link_to [:admin, @project], class: 'btn btn-default gl-button btn-icon', title: _('View project in admin area'),
data: {toggle: 'tooltip', placement: 'top', container: 'body'} do
= sprite_icon('admin')
- .gl-display-flex.gl-align-items-start.gl-mr-3
- - if @notification_setting
- .js-vue-notification-dropdown{ data: { button_size: "small", disabled: emails_disabled.to_s, dropdown_items: notification_dropdown_items(@notification_setting).to_json, notification_level: @notification_setting.level, help_page_path: help_page_path('user/profile/notifications'), project_id: @project.id, no_flip: 'true' } }
+ - if @notification_setting
+ .js-vue-notification-dropdown{ data: { disabled: emails_disabled.to_s, dropdown_items: notification_dropdown_items(@notification_setting).to_json, notification_level: @notification_setting.level, help_page_path: help_page_path('user/profile/notifications'), project_id: @project.id, no_flip: 'true' } }
- .count-buttons.gl-display-flex.gl-align-items-flex-start
- = render 'projects/buttons/star'
- = render 'projects/buttons/fork'
+ = render 'projects/buttons/star'
+ = render 'projects/buttons/fork'
- if can?(current_user, :download_code, @project)
= cache_if(cache_enabled, [@project, :download_code], expires_in: 1.minute) do
diff --git a/app/views/projects/buttons/_fork.html.haml b/app/views/projects/buttons/_fork.html.haml
index 3621853430d..2419764f8f9 100644
--- a/app/views/projects/buttons/_fork.html.haml
+++ b/app/views/projects/buttons/_fork.html.haml
@@ -2,7 +2,7 @@
- if current_user
.count-badge.btn-group
- if current_user.already_forked?(@project) && current_user.forkable_namespaces.size < 2
- = link_to namespace_project_path(current_user, current_user.fork_of(@project)), title: s_('ProjectOverview|Go to your fork'), class: 'gl-button btn btn-default btn-sm has-tooltip fork-btn' do
+ = link_to namespace_project_path(current_user, current_user.fork_of(@project)), title: s_('ProjectOverview|Go to your fork'), class: 'gl-button btn btn-default has-tooltip fork-btn' do
= sprite_icon('fork', css_class: 'icon')
%span= s_('ProjectOverview|Fork')
- else
@@ -11,8 +11,8 @@
- button_class = 'disabled' if disabled_tooltip
%span.btn-group{ class: ('has-tooltip' if disabled_tooltip), title: disabled_tooltip }
- = link_to new_project_fork_path(@project), class: "gl-button btn btn-default btn-sm fork-btn #{button_class}" do
+ = link_to new_project_fork_path(@project), class: "gl-button btn btn-default fork-btn #{button_class}" do
= sprite_icon('fork', css_class: 'icon')
%span= s_('ProjectOverview|Fork')
- = link_to project_forks_path(@project), title: n_(s_('ProjectOverview|Forks'), s_('ProjectOverview|Forks'), @project.forks_count), class: "gl-button btn btn-default btn-sm count has-tooltip fork-count #{count_class}" do
+ = link_to project_forks_path(@project), title: n_(s_('ProjectOverview|Forks'), s_('ProjectOverview|Forks'), @project.forks_count), class: "gl-button btn btn-default count has-tooltip fork-count #{count_class}" do
= @project.forks_count
diff --git a/app/views/projects/buttons/_star.html.haml b/app/views/projects/buttons/_star.html.haml
index eaf906ad89f..d4dcfbdff54 100644
--- a/app/views/projects/buttons/_star.html.haml
+++ b/app/views/projects/buttons/_star.html.haml
@@ -3,15 +3,15 @@
- icon = starred ? 'star' : 'star-o'
- button_text = starred ? s_('ProjectOverview|Unstar') : s_('ProjectOverview|Star')
- button_text_classes = starred ? 'starred' : ''
- .count-badge.d-inline-flex.align-item-stretch.gl-mr-3.btn-group
- = render Pajamas::ButtonComponent.new(size: :small, icon: icon, button_text_classes: button_text_classes, button_options: { class: 'star-btn toggle-star', data: { endpoint: toggle_star_project_path(@project, :json) } }) do
+ .count-badge.d-inline-flex.align-item-stretch.btn-group
+ = render Pajamas::ButtonComponent.new(size: :medium, icon: icon, button_text_classes: button_text_classes, button_options: { class: 'star-btn toggle-star', data: { endpoint: toggle_star_project_path(@project, :json) } }) do
- button_text
- = link_to project_starrers_path(@project), title: n_(s_('ProjectOverview|Starrer'), s_('ProjectOverview|Starrers'), @project.star_count), class: 'gl-button btn btn-default btn-sm has-tooltip star-count count' do
+ = link_to project_starrers_path(@project), title: n_(s_('ProjectOverview|Starrer'), s_('ProjectOverview|Starrers'), @project.star_count), class: 'gl-button btn btn-default has-tooltip star-count count' do
= @project.star_count
- else
- .count-badge.d-inline-flex.align-item-stretch.gl-mr-3.btn-group
- = link_to new_user_session_path, class: 'gl-button btn btn-default btn-sm has-tooltip star-btn', title: s_('ProjectOverview|You must sign in to star a project') do
+ .count-badge.d-inline-flex.align-item-stretch.btn-group
+ = link_to new_user_session_path, class: 'gl-button btn btn-default has-tooltip star-btn', title: s_('ProjectOverview|You must sign in to star a project') do
= sprite_icon('star-o', css_class: 'icon')
%span= s_('ProjectOverview|Star')
- = link_to project_starrers_path(@project), title: n_(s_('ProjectOverview|Starrer'), s_('ProjectOverview|Starrers'), @project.star_count), class: 'gl-button btn btn-default btn-sm has-tooltip star-count count' do
+ = link_to project_starrers_path(@project), title: n_(s_('ProjectOverview|Starrer'), s_('ProjectOverview|Starrers'), @project.star_count), class: 'gl-button btn btn-default has-tooltip star-count count' do
= @project.star_count
diff --git a/app/views/shared/projects/_project.html.haml b/app/views/shared/projects/_project.html.haml
index 908eb2428e8..40cd81ab3da 100644
--- a/app/views/shared/projects/_project.html.haml
+++ b/app/views/shared/projects/_project.html.haml
@@ -52,7 +52,7 @@
%span.user-access-role.gl-display-block.gl-m-0{ data: { qa_selector: 'user_role_content' } }= Gitlab::Access.human_access(access)
- if !explore_projects_tab?
- = render_if_exists 'compliance_management/compliance_framework/compliance_framework_badge', project: project
+ = render_if_exists 'compliance_management/compliance_framework/compliance_framework_badge', project: project, additional_classes: 'gl-ml-3!'
- if show_last_commit_as_description
.description.gl-display-none.gl-sm-display-block.gl-overflow-hidden.gl-mr-3.gl-mt-2
diff --git a/config/feature_flags/ops/search_curation_dry_run.yml b/config/feature_flags/ops/search_curation_dry_run.yml
new file mode 100644
index 00000000000..37f5328ff13
--- /dev/null
+++ b/config/feature_flags/ops/search_curation_dry_run.yml
@@ -0,0 +1,8 @@
+---
+name: search_curation_dry_run
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/104321
+rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/383003
+milestone: '15.7'
+type: ops
+group: group::global search
+default_enabled: true
diff --git a/db/docs/pm_licenses.yml b/db/docs/pm_licenses.yml
new file mode 100644
index 00000000000..20b665289b9
--- /dev/null
+++ b/db/docs/pm_licenses.yml
@@ -0,0 +1,9 @@
+---
+table_name: pm_licenses
+classes:
+- PackageMetadata::License
+feature_categories:
+ - license_compliance
+description: Tracks licenses referenced by public package registries.
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/102794
+milestone: '15.6'
diff --git a/db/docs/pm_package_version_licenses.yml b/db/docs/pm_package_version_licenses.yml
new file mode 100644
index 00000000000..2a10cf30681
--- /dev/null
+++ b/db/docs/pm_package_version_licenses.yml
@@ -0,0 +1,9 @@
+---
+table_name: pm_package_version_licenses
+classes:
+- PackageMetadata::PackageVersionLicense
+feature_categories:
+ - license_compliance
+description: Tracks licenses under which a given package version has been published.
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/102794
+milestone: '15.6'
diff --git a/db/docs/pm_package_versions.yml b/db/docs/pm_package_versions.yml
new file mode 100644
index 00000000000..8915dbd094d
--- /dev/null
+++ b/db/docs/pm_package_versions.yml
@@ -0,0 +1,9 @@
+---
+table_name: pm_package_versions
+classes:
+- PackageMetadata::PackageVersion
+feature_categories:
+- license_compliance
+description: Tracks package versions served by public package registries.
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/102794
+milestone: '15.6'
diff --git a/db/docs/pm_packages.yml b/db/docs/pm_packages.yml
new file mode 100644
index 00000000000..f26a14c3268
--- /dev/null
+++ b/db/docs/pm_packages.yml
@@ -0,0 +1,9 @@
+---
+table_name: pm_packages
+classes:
+- PackageMetadata::Package
+feature_categories:
+- license_compliance
+description: Tracks packages served by public package registries.
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/102794
+milestone: '15.6'
diff --git a/db/migrate/20221101174816_create_package_metadata.rb b/db/migrate/20221101174816_create_package_metadata.rb
new file mode 100644
index 00000000000..f1456689641
--- /dev/null
+++ b/db/migrate/20221101174816_create_package_metadata.rb
@@ -0,0 +1,11 @@
+# frozen_string_literal: true
+
+class CreatePackageMetadata < Gitlab::Database::Migration[2.0]
+ def change
+ create_table :pm_packages do |t|
+ t.integer :purl_type, null: false, limit: 2
+ t.text :name, null: false, limit: 255
+ t.index [:purl_type, :name], unique: true, name: 'i_pm_packages_purl_type_and_name'
+ end
+ end
+end
diff --git a/db/migrate/20221101194416_create_package_metadata_versions.rb b/db/migrate/20221101194416_create_package_metadata_versions.rb
new file mode 100644
index 00000000000..19b86d86de1
--- /dev/null
+++ b/db/migrate/20221101194416_create_package_metadata_versions.rb
@@ -0,0 +1,11 @@
+# frozen_string_literal: true
+
+class CreatePackageMetadataVersions < Gitlab::Database::Migration[2.0]
+ def change
+ create_table :pm_package_versions do |t|
+ t.references :pm_package, foreign_key: { to_table: :pm_packages, on_delete: :cascade }
+ t.text :version, null: false, limit: 255
+ t.index [:pm_package_id, :version], unique: true, name: 'i_pm_package_versions_on_package_id_and_version'
+ end
+ end
+end
diff --git a/db/migrate/20221101195309_create_package_metadata_licenses.rb b/db/migrate/20221101195309_create_package_metadata_licenses.rb
new file mode 100644
index 00000000000..1fbb0c147bd
--- /dev/null
+++ b/db/migrate/20221101195309_create_package_metadata_licenses.rb
@@ -0,0 +1,10 @@
+# frozen_string_literal: true
+
+class CreatePackageMetadataLicenses < Gitlab::Database::Migration[2.0]
+ def change
+ create_table :pm_licenses do |t|
+ t.text :spdx_identifier, null: false, limit: 50
+ t.index [:spdx_identifier], unique: true, name: 'i_pm_licenses_on_spdx_identifier'
+ end
+ end
+end
diff --git a/db/migrate/20221101195543_create_package_metadata_package_version_licenses.rb b/db/migrate/20221101195543_create_package_metadata_package_version_licenses.rb
new file mode 100644
index 00000000000..9007f869e1f
--- /dev/null
+++ b/db/migrate/20221101195543_create_package_metadata_package_version_licenses.rb
@@ -0,0 +1,12 @@
+# frozen_string_literal: true
+
+class CreatePackageMetadataPackageVersionLicenses < Gitlab::Database::Migration[2.0]
+ INDEX_NAME = 'i_pm_package_version_licenses_on_version_and_license_ids'
+
+ def change
+ create_table :pm_package_version_licenses, primary_key: [:pm_package_version_id, :pm_license_id] do |t|
+ t.references :pm_package_version, foreign_key: { on_delete: :cascade }, null: false
+ t.references :pm_license, foreign_key: { on_delete: :cascade }, null: false
+ end
+ end
+end
diff --git a/db/schema_migrations/20221101174816 b/db/schema_migrations/20221101174816
new file mode 100644
index 00000000000..ce1368622a4
--- /dev/null
+++ b/db/schema_migrations/20221101174816
@@ -0,0 +1 @@
+2bd5e8cadc82207b0540d8418e6312cc76868318e1e040fb64fa69dfe3e01cb8 \ No newline at end of file
diff --git a/db/schema_migrations/20221101194416 b/db/schema_migrations/20221101194416
new file mode 100644
index 00000000000..0051072650c
--- /dev/null
+++ b/db/schema_migrations/20221101194416
@@ -0,0 +1 @@
+c4fc7b3ca831b670c504a824cbe07d6c94ddaa5c432d37cb353807c5387ee4e8 \ No newline at end of file
diff --git a/db/schema_migrations/20221101195309 b/db/schema_migrations/20221101195309
new file mode 100644
index 00000000000..54fd0d397b6
--- /dev/null
+++ b/db/schema_migrations/20221101195309
@@ -0,0 +1 @@
+9033c025820c306db295ac4acacc8fa2d99aa78f3883e134829beb8c756eacb1 \ No newline at end of file
diff --git a/db/schema_migrations/20221101195543 b/db/schema_migrations/20221101195543
new file mode 100644
index 00000000000..a1b20ac6274
--- /dev/null
+++ b/db/schema_migrations/20221101195543
@@ -0,0 +1 @@
+18e1da4447efd3c77c6a4baf194eb0cfa787d5ce60e544c6fd8d4ed0818f9082 \ No newline at end of file
diff --git a/db/structure.sql b/db/structure.sql
index e88ff1be365..7de5b021f1c 100644
--- a/db/structure.sql
+++ b/db/structure.sql
@@ -19466,6 +19466,58 @@ CREATE SEQUENCE plans_id_seq
ALTER SEQUENCE plans_id_seq OWNED BY plans.id;
+CREATE TABLE pm_licenses (
+ id bigint NOT NULL,
+ spdx_identifier text NOT NULL,
+ CONSTRAINT check_c1eb81d1ba CHECK ((char_length(spdx_identifier) <= 50))
+);
+
+CREATE SEQUENCE pm_licenses_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+ALTER SEQUENCE pm_licenses_id_seq OWNED BY pm_licenses.id;
+
+CREATE TABLE pm_package_version_licenses (
+ pm_package_version_id bigint NOT NULL,
+ pm_license_id bigint NOT NULL
+);
+
+CREATE TABLE pm_package_versions (
+ id bigint NOT NULL,
+ pm_package_id bigint,
+ version text NOT NULL,
+ CONSTRAINT check_2d8a88cfcc CHECK ((char_length(version) <= 255))
+);
+
+CREATE SEQUENCE pm_package_versions_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+ALTER SEQUENCE pm_package_versions_id_seq OWNED BY pm_package_versions.id;
+
+CREATE TABLE pm_packages (
+ id bigint NOT NULL,
+ purl_type smallint NOT NULL,
+ name text NOT NULL,
+ CONSTRAINT check_3a3aedb8ba CHECK ((char_length(name) <= 255))
+);
+
+CREATE SEQUENCE pm_packages_id_seq
+ START WITH 1
+ INCREMENT BY 1
+ NO MINVALUE
+ NO MAXVALUE
+ CACHE 1;
+
+ALTER SEQUENCE pm_packages_id_seq OWNED BY pm_packages.id;
+
CREATE TABLE pool_repositories (
id bigint NOT NULL,
shard_id integer NOT NULL,
@@ -24168,6 +24220,12 @@ ALTER TABLE ONLY plan_limits ALTER COLUMN id SET DEFAULT nextval('plan_limits_id
ALTER TABLE ONLY plans ALTER COLUMN id SET DEFAULT nextval('plans_id_seq'::regclass);
+ALTER TABLE ONLY pm_licenses ALTER COLUMN id SET DEFAULT nextval('pm_licenses_id_seq'::regclass);
+
+ALTER TABLE ONLY pm_package_versions ALTER COLUMN id SET DEFAULT nextval('pm_package_versions_id_seq'::regclass);
+
+ALTER TABLE ONLY pm_packages ALTER COLUMN id SET DEFAULT nextval('pm_packages_id_seq'::regclass);
+
ALTER TABLE ONLY pool_repositories ALTER COLUMN id SET DEFAULT nextval('pool_repositories_id_seq'::regclass);
ALTER TABLE ONLY postgres_async_indexes ALTER COLUMN id SET DEFAULT nextval('postgres_async_indexes_id_seq'::regclass);
@@ -26333,6 +26391,18 @@ ALTER TABLE ONLY plan_limits
ALTER TABLE ONLY plans
ADD CONSTRAINT plans_pkey PRIMARY KEY (id);
+ALTER TABLE ONLY pm_licenses
+ ADD CONSTRAINT pm_licenses_pkey PRIMARY KEY (id);
+
+ALTER TABLE ONLY pm_package_version_licenses
+ ADD CONSTRAINT pm_package_version_licenses_pkey PRIMARY KEY (pm_package_version_id, pm_license_id);
+
+ALTER TABLE ONLY pm_package_versions
+ ADD CONSTRAINT pm_package_versions_pkey PRIMARY KEY (id);
+
+ALTER TABLE ONLY pm_packages
+ ADD CONSTRAINT pm_packages_pkey PRIMARY KEY (id);
+
ALTER TABLE ONLY pool_repositories
ADD CONSTRAINT pool_repositories_pkey PRIMARY KEY (id);
@@ -27870,6 +27940,12 @@ CREATE UNIQUE INDEX i_ci_job_token_project_scope_links_on_source_and_target_proj
CREATE INDEX i_compliance_frameworks_on_id_and_created_at ON compliance_management_frameworks USING btree (id, created_at, pipeline_configuration_full_path);
+CREATE UNIQUE INDEX i_pm_licenses_on_spdx_identifier ON pm_licenses USING btree (spdx_identifier);
+
+CREATE UNIQUE INDEX i_pm_package_versions_on_package_id_and_version ON pm_package_versions USING btree (pm_package_id, version);
+
+CREATE UNIQUE INDEX i_pm_packages_purl_type_and_name ON pm_packages USING btree (purl_type, name);
+
CREATE INDEX idx_analytics_devops_adoption_segments_on_namespace_id ON analytics_devops_adoption_segments USING btree (namespace_id);
CREATE INDEX idx_analytics_devops_adoption_snapshots_finalized ON analytics_devops_adoption_snapshots USING btree (namespace_id, end_time) WHERE (recorded_at >= end_time);
@@ -30148,6 +30224,12 @@ CREATE UNIQUE INDEX index_plan_limits_on_plan_id ON plan_limits USING btree (pla
CREATE UNIQUE INDEX index_plans_on_name ON plans USING btree (name);
+CREATE INDEX index_pm_package_version_licenses_on_pm_license_id ON pm_package_version_licenses USING btree (pm_license_id);
+
+CREATE INDEX index_pm_package_version_licenses_on_pm_package_version_id ON pm_package_version_licenses USING btree (pm_package_version_id);
+
+CREATE INDEX index_pm_package_versions_on_pm_package_id ON pm_package_versions USING btree (pm_package_id);
+
CREATE UNIQUE INDEX index_pool_repositories_on_disk_path ON pool_repositories USING btree (disk_path);
CREATE INDEX index_pool_repositories_on_shard_id ON pool_repositories USING btree (shard_id);
@@ -33992,6 +34074,9 @@ ALTER TABLE ONLY issuable_severities
ALTER TABLE ONLY saml_providers
ADD CONSTRAINT fk_rails_306d459be7 FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
+ALTER TABLE ONLY pm_package_version_licenses
+ ADD CONSTRAINT fk_rails_30ddb7f837 FOREIGN KEY (pm_package_version_id) REFERENCES pm_package_versions(id) ON DELETE CASCADE;
+
ALTER TABLE ONLY resource_state_events
ADD CONSTRAINT fk_rails_3112bba7dc FOREIGN KEY (merge_request_id) REFERENCES merge_requests(id) ON DELETE CASCADE;
@@ -34439,6 +34524,9 @@ ALTER TABLE ONLY merge_request_context_commit_diff_files
ALTER TABLE ONLY group_crm_settings
ADD CONSTRAINT fk_rails_74fdf2f13d FOREIGN KEY (group_id) REFERENCES namespaces(id) ON DELETE CASCADE;
+ALTER TABLE ONLY pm_package_version_licenses
+ ADD CONSTRAINT fk_rails_7520ea026d FOREIGN KEY (pm_license_id) REFERENCES pm_licenses(id) ON DELETE CASCADE;
+
ALTER TABLE ONLY clusters_applications_ingress
ADD CONSTRAINT fk_rails_753a7b41c1 FOREIGN KEY (cluster_id) REFERENCES clusters(id) ON DELETE CASCADE;
@@ -35003,6 +35091,9 @@ ALTER TABLE ONLY resource_iteration_events
ALTER TABLE ONLY member_roles
ADD CONSTRAINT fk_rails_cf0ee35814 FOREIGN KEY (namespace_id) REFERENCES namespaces(id) ON DELETE CASCADE;
+ALTER TABLE ONLY pm_package_versions
+ ADD CONSTRAINT fk_rails_cf94c3e601 FOREIGN KEY (pm_package_id) REFERENCES pm_packages(id) ON DELETE CASCADE;
+
ALTER TABLE ONLY upload_states
ADD CONSTRAINT fk_rails_d00f153613 FOREIGN KEY (upload_id) REFERENCES uploads(id) ON DELETE CASCADE;
diff --git a/doc/administration/geo/replication/tuning.md b/doc/administration/geo/replication/tuning.md
index ab9263ad344..4dc3ba93d66 100644
--- a/doc/administration/geo/replication/tuning.md
+++ b/doc/administration/geo/replication/tuning.md
@@ -30,7 +30,7 @@ However, this may not lead to more downloads in parallel unless the number of
available Sidekiq threads is also increased. For example, if repository synchronization
concurrency is increased from 25 to 50, you may also want to increase the number
of Sidekiq threads from 25 to 50. See the
-[Sidekiq concurrency documentation](../../sidekiq/extra_sidekiq_processes.md#number-of-threads)
+[Sidekiq concurrency documentation](../../sidekiq/extra_sidekiq_processes.md#concurrency)
for more details.
## Repository re-verification
diff --git a/doc/administration/geo/setup/database.md b/doc/administration/geo/setup/database.md
index 86caf5306b5..1ee7f0665cd 100644
--- a/doc/administration/geo/setup/database.md
+++ b/doc/administration/geo/setup/database.md
@@ -519,6 +519,9 @@ data before running `pg_basebackup`.
add `--force` to the command line.
- When not in a production machine you can disable backup step if you
really sure this is what you want by adding `--skip-backup`
+ - If you are using PgBouncer, you need to target the database host directly.
+ - If you are using Patroni on your primary site, you must target the current leader host.
+ - If you are using a load balancer proxy (for example HAProxy) and it is targeting the Patroni leader for the primary, you should target the load balancer proxy instead.
The replication process is now complete.
diff --git a/doc/administration/operations/index.md b/doc/administration/operations/index.md
index d18f41becd5..7b495a5dd58 100644
--- a/doc/administration/operations/index.md
+++ b/doc/administration/operations/index.md
@@ -16,7 +16,6 @@ Keep your GitLab instance up and running smoothly.
- [Sidekiq MemoryKiller](sidekiq_memory_killer.md): Configure Sidekiq MemoryKiller
to restart Sidekiq.
- [Multiple Sidekiq processes](extra_sidekiq_processes.md): Configure multiple Sidekiq processes to ensure certain queues always have dedicated workers, no matter the number of jobs that must be processed. **(FREE SELF)**
-- [Sidekiq routing rules](extra_sidekiq_routing.md): Configure the routing rules to route a job from a worker to a desirable queue. **(FREE SELF)**
- [Puma](puma.md): Understand Puma and puma-worker-killer.
- Speed up SSH operations by
[Authorizing SSH users via a fast, indexed lookup to the GitLab database](fast_ssh_key_lookup.md), and/or
diff --git a/doc/administration/sidekiq/extra_sidekiq_processes.md b/doc/administration/sidekiq/extra_sidekiq_processes.md
index feaaa55aa59..d5007e9a3e9 100644
--- a/doc/administration/sidekiq/extra_sidekiq_processes.md
+++ b/doc/administration/sidekiq/extra_sidekiq_processes.md
@@ -6,91 +6,41 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Run multiple Sidekiq processes **(FREE SELF)**
-GitLab allows you to start multiple Sidekiq processes.
-These processes can be used to consume a dedicated set
-of queues. This can be used to ensure certain queues always have dedicated
-workers, no matter the number of jobs to be processed.
+GitLab allows you to start multiple Sidekiq processes to process background jobs
+at a higher rate on a single instance. By default, Sidekiq starts one worker
+process and only uses a single core.
NOTE:
The information in this page applies only to Omnibus GitLab.
-## Available Sidekiq queues
-
-For a list of the existing Sidekiq queues, check the following files:
-
-- [Queues for both GitLab Community and Enterprise Editions](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
-- [Queues for GitLab Enterprise Editions only](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml)
-
-Each entry in the above files represents a queue on which Sidekiq processes
-can be started.
-
## Start multiple processes
> - [Introduced](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4006) in GitLab 12.10, starting multiple processes with Sidekiq cluster.
> - [Sidekiq cluster moved](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181) to GitLab Free in 12.10.
> - [Sidekiq cluster became default](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4140) in GitLab 13.0.
-When starting multiple processes, the number of processes should
-equal (and **not** exceed) the number of CPU cores you want to
-dedicate to Sidekiq. Each Sidekiq process can use only 1 CPU
-core, subject to the available workload and concurrency settings.
+When starting multiple processes, the number of processes should at most equal
+(and **not** exceed) the number of CPU cores you want to dedicate to Sidekiq.
+The Sidekiq worker process uses no more than one CPU core.
-To start multiple processes:
+To start multiple processes, use the `sidekiq['queue_groups']` array setting to
+specify how many processes to create using `sidekiq-cluster` and which queues
+they should handle. Each item in the array equates to one additional Sidekiq
+process, and values in each item determine the queues it works on. In the vast
+majority of cases, all processes should listen to all queues (see
+[processing specific job classes](processing_specific_job_classes.md) for more
+details).
-1. Using the `sidekiq['queue_groups']` array setting, specify how many processes to
- create using `sidekiq-cluster` and which queue they should handle.
- Each item in the array equates to one additional Sidekiq
- process, and values in each item determine the queues it works on.
+For example, to create four Sidekiq processes, each listening
+to all available queues:
- For example, the following setting creates three Sidekiq processes, one to run on
- `elastic_commit_indexer`, one to run on `mailers`, and one process running on all queues:
+1. Edit `/etc/gitlab/gitlab.rb`:
```ruby
- sidekiq['queue_groups'] = [
- "elastic_commit_indexer",
- "mailers",
- "*"
- ]
+ sidekiq['queue_groups'] = ['*'] * 4
```
- To have an additional Sidekiq process handle multiple queues, add multiple
- queue names to its item delimited by commas. For example:
-
- ```ruby
- sidekiq['queue_groups'] = [
- "elastic_commit_indexer, elastic_association_indexer",
- "mailers",
- "*"
- ]
- ```
-
- [In GitLab 12.9](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26594) and
- later, the special queue name `*` means all queues. This starts two
- processes, each handling all queues:
-
- ```ruby
- sidekiq['queue_groups'] = [
- "*",
- "*"
- ]
- ```
-
- `*` which matches all workers.
- As a result, the wildcard query must stay at the end of the list or the rules after it are ignored.
-
- `*` cannot be combined with concrete queue names - `*, mailers`
- just handles the `mailers` queue.
-
- When `sidekiq-cluster` is only running on a single node, make sure that at least
- one process is running on all queues using `*`. This ensures a process
- automatically picks up jobs in queues created in the future,
- including queues that have dedicated processes.
-
- If `sidekiq-cluster` is running on more than one node, you can also use
- [`--negate`](#negate-settings) and list all the queues that are already being
- processed.
-
-1. Save the file and reconfigure GitLab for the changes to take effect:
+1. Save the file and reconfigure GitLab:
```shell
sudo gitlab-ctl reconfigure
@@ -101,125 +51,38 @@ To view the Sidekiq processes in GitLab:
1. On the top bar, select **Main menu > Admin**.
1. On the left sidebar, select **Monitoring > Background Jobs**.
-## Negate settings
+## Concurrency
-To have the Sidekiq process work on every queue **except** the ones
-you list. In this example, we exclude all import-related jobs from a Sidekiq node:
+By default each process defined under `sidekiq` starts with a number of threads
+that equals the number of queues, plus one spare thread, up to a maximum of 50.
+For example, a process that handles all queues will use 50 threads by default.
-1. Edit `/etc/gitlab/gitlab.rb` and add:
-
- ```ruby
- sidekiq['negate'] = true
- sidekiq['queue_selector'] = true
- sidekiq['queue_groups'] = [
- "feature_category=importers"
- ]
- ```
-
-1. Save the file and reconfigure GitLab for the changes to take effect:
-
- ```shell
- sudo gitlab-ctl reconfigure
- ```
-
-## Queue selector
-
-> - [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/45) in GitLab 12.8.
-> - [Sidekiq cluster, including queue selector, moved](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181) to GitLab Free in 12.10.
-> - [Renamed from `experimental_queue_selector` to `queue_selector`](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/147) in GitLab 13.6.
-
-In addition to selecting queues by name, as above, the `queue_selector` option
-allows queue groups to be selected in a more general way using a
-[worker matching query](extra_sidekiq_routing.md#worker-matching-query). After `queue_selector`
-is set, all `queue_groups` must follow the aforementioned syntax.
-
-In `/etc/gitlab/gitlab.rb`:
-
-```ruby
-sidekiq['enable'] = true
-sidekiq['queue_selector'] = true
-sidekiq['queue_groups'] = [
- # Run all non-CPU-bound queues that are high urgency
- 'resource_boundary!=cpu&urgency=high',
- # Run all continuous integration and pages queues that are not high urgency
- 'feature_category=continuous_integration,pages&urgency!=high',
- # Run all queues
- '*'
-]
-```
-
-## Ignore all import queues
-
-When [importing from GitHub](../../user/project/import/github.md) or
-other sources, Sidekiq might use all of its resources to perform those
-operations. To set up two separate `sidekiq-cluster` processes, where
-one only processes imports and the other processes all other queues:
-
-1. Edit `/etc/gitlab/gitlab.rb` and add:
-
- ```ruby
- sidekiq['enable'] = true
- sidekiq['queue_selector'] = true
- sidekiq['queue_groups'] = [
- "feature_category=importers",
- "feature_category!=importers"
- ]
- ```
-
-1. Save the file and reconfigure GitLab for the changes to take effect:
-
- ```shell
- sudo gitlab-ctl reconfigure
- ```
-
-## Number of threads
-
-By default each process defined under `sidekiq` starts with a
-number of threads that equals the number of queues, plus one spare thread.
-For example, a process that handles the `process_commit` and `post_receive`
-queues uses three threads in total.
-
-These thread run inside a single Ruby process, and each process
-can only use a single CPU core. The usefulness of threading depends
-on the work having some external dependencies to wait on, like database queries or
-HTTP requests. Most Sidekiq deployments benefit from this threading, and when
-running fewer queues in a process, increasing the thread count might be
-even more desirable to make the most effective use of CPU resources.
+These threads run inside a single Ruby process, and each process can only use a
+single CPU core. The usefulness of threading depends on the work having some
+external dependencies to wait on, like database queries or HTTP requests. Most
+Sidekiq deployments benefit from this threading.
### Manage thread counts explicitly
-The correct maximum thread count (also called concurrency) depends on the workload.
-Typical values range from `1` for highly CPU-bound tasks to `15` or higher for mixed
-low-priority work. A reasonable starting range is `15` to `25` for a non-specialized
-deployment.
+The correct maximum thread count (also called concurrency) depends on the
+workload. Typical values range from `5` for highly CPU-bound tasks to `15` or
+higher for mixed low-priority work. A reasonable starting range is `15` to `25`
+for a non-specialized deployment.
-You can find example values used by GitLab.com by searching for `concurrency:` in
-[the Helm charts](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/blob/master/releases/gitlab/values/gprd.yaml.gotmpl).
-The values vary according to the work each specific deployment of Sidekiq does.
-Any other specialized deployments with processes dedicated to specific queues should
-have the concurrency tuned according to:
-have the concurrency tuned according to:
+We only recommend setting explicit concurrency by setting `min_concurrency` and
+`max_concurrency` to the same value. The two values are kept for backwards
+compatibility reasons, but for more predictable results, use the same value.
-- The CPU usage of each type of process.
-- The throughput achieved.
-
-Each thread requires a Redis connection, so adding threads may increase Redis
-latency and potentially cause client timeouts. See the
-[Sidekiq documentation about Redis](https://github.com/mperham/sidekiq/wiki/Using-Redis) for more
-details.
-
-#### When running Sidekiq cluster (default)
+For example, to set the concurrency to `20`:
-Running Sidekiq cluster is the default in GitLab 13.0 and later.
-
-1. Edit `/etc/gitlab/gitlab.rb` and add:
+1. Edit `/etc/gitlab/gitlab.rb`:
```ruby
- sidekiq['min_concurrency'] = 15
- sidekiq['max_concurrency'] = 25
+ sidekiq['min_concurrency'] = 20
+ sidekiq['max_concurrency'] = 20
```
-1. Save the file and reconfigure GitLab for the changes to take effect:
+1. Save the file and reconfigure GitLab:
```shell
sudo gitlab-ctl reconfigure
@@ -231,50 +94,45 @@ the other. Setting `min_concurrency` to `0` disables the limit.
For each queue group, let `N` be one more than the number of queues. The
concurrency is set to:
+1. `min_concurrency`, if it's equal to `max_concurrency`.
1. `N`, if it's between `min_concurrency` and `max_concurrency`.
1. `max_concurrency`, if `N` exceeds this value.
1. `min_concurrency`, if `N` is less than this value.
-If `min_concurrency` is equal to `max_concurrency`, then this value is used
-regardless of the number of queues.
-
When `min_concurrency` is greater than `max_concurrency`, it is treated as
being equal to `max_concurrency`.
-#### When running a single Sidekiq process
-
-Running a single Sidekiq process is the default in GitLab 12.10 and earlier.
-
-WARNING:
-Running Sidekiq directly was removed in GitLab
-[14.0](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/240).
-
-1. Edit `/etc/gitlab/gitlab.rb` and add:
-
- ```ruby
- sidekiq['cluster'] = false
- sidekiq['concurrency'] = 25
- ```
-
-1. Save the file and reconfigure GitLab for the changes to take effect:
+You can find example values used by GitLab.com by searching for `concurrency:`
+in [the Helm charts](https://gitlab.com/gitlab-com/gl-infra/k8s-workloads/gitlab-com/-/blob/master/releases/gitlab/values/gprd.yaml.gotmpl).
+The values vary according to the work each specific deployment of Sidekiq does.
+Any other specialized deployments with processes dedicated to specific queues
+should have the concurrency tuned according to:
- ```shell
- sudo gitlab-ctl reconfigure
- ```
+- The CPU usage of each type of process.
+- The throughput achieved.
-This sets the concurrency (number of threads) for the Sidekiq process.
+Each thread requires a Redis connection, so adding threads may increase Redis
+latency and potentially cause client timeouts. See the [Sidekiq documentation about Redis](https://github.com/mperham/sidekiq/wiki/Using-Redis)
+for more details.
## Modify the check interval
-To modify `sidekiq-cluster`'s health check interval for the additional Sidekiq processes:
+To modify Sidekiq's health check interval for the additional Sidekiq
+processes:
-1. Edit `/etc/gitlab/gitlab.rb` and add (the value can be any integer number of seconds):
+1. Edit `/etc/gitlab/gitlab.rb`:
```ruby
sidekiq['interval'] = 5
```
-1. Save the file and [reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
+ The value can be any integer number of seconds.
+
+1. Save the file and reconfigure GitLab:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
## Troubleshoot using the CLI
@@ -291,6 +149,9 @@ takes arguments using the following syntax:
/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster [QUEUE,QUEUE,...] [QUEUE, ...]
```
+The `--dryrun` argument allows viewing the command to be executed without
+actually starting it.
+
Each separate argument denotes a group of queues that have to be processed by a
Sidekiq process. Multiple queues can be processed by the same process by
separating them with a comma instead of a space.
@@ -301,29 +162,6 @@ explicitly list all the queue names. For more information about queue namespaces
see the relevant section in the
[Sidekiq development documentation](../../development/sidekiq/index.md#queue-namespaces).
-For example, say you want to start 2 extra processes: one to process the
-`process_commit` queue, and one to process the `post_receive` queue. This can be
-done as follows:
-
-```shell
-/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit post_receive
-```
-
-If you instead want to start one process processing both queues, you'd use the
-following syntax:
-
-```shell
-/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit,post_receive
-```
-
-If you want to have one Sidekiq process dealing with the `process_commit` and
-`post_receive` queues, and one process to process the `gitlab_shell` queue,
-you'd use the following:
-
-```shell
-/opt/gitlab/embedded/service/gitlab-rails/bin/sidekiq-cluster process_commit,post_receive gitlab_shell
-```
-
### Monitor the `sidekiq-cluster` command
The `sidekiq-cluster` command does not terminate once it has started the desired
diff --git a/doc/administration/sidekiq/extra_sidekiq_routing.md b/doc/administration/sidekiq/extra_sidekiq_routing.md
index 56c51beb758..d1d65498fcc 100644
--- a/doc/administration/sidekiq/extra_sidekiq_routing.md
+++ b/doc/administration/sidekiq/extra_sidekiq_routing.md
@@ -1,163 +1,11 @@
---
-stage: Systems
-group: Distribution
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: 'processing_specific_job_classes.md#routing-rules'
+remove_date: '2023-02-01'
---
-# Queue routing rules **(FREE SELF)**
+This document was moved to [another location](processing_specific_job_classes.md#routing-rules).
-When the number of Sidekiq jobs increases to a certain scale, the system faces
-some scalability issues. One of them is that the length of the queue tends to get
-longer. High-urgency jobs have to wait longer until other less urgent jobs
-finish. This head-of-line blocking situation may eventually affect the
-responsiveness of the system, especially critical actions. In another scenario,
-the performance of some jobs is degraded due to other long running or CPU-intensive jobs
-(computing or rendering ones) in the same machine.
-
-To counter the aforementioned issues, one effective solution is to split
-Sidekiq jobs into different queues and assign machines handling each queue
-exclusively. For example, all CPU-intensive jobs could be routed to the
-`cpu-bound` queue and handled by a fleet of CPU optimized instances. The queue
-topology differs between companies depending on the workloads and usage
-patterns. Therefore, GitLab supports a flexible mechanism for the
-administrator to route the jobs based on their characteristics.
-
-As an alternative to [Queue selector](extra_sidekiq_processes.md#queue-selector), which
-configures Sidekiq cluster to listen to a specific set of workers or queues,
-GitLab also supports routing a job from a worker to the desired queue when it
-is scheduled. Sidekiq clients try to match a job against a configured list of
-routing rules. Rules are evaluated from first to last, and as soon as we find a
-match for a given worker we stop processing for that worker (first match wins).
-If the worker doesn't match any rule, it falls back to the queue name generated
-from the worker name.
-
-By default, if the routing rules are not configured (or denoted with an empty
-array), all the jobs are routed to the queue generated from the worker name.
-
-## Example configuration
-
-In `/etc/gitlab/gitlab.rb`:
-
-```ruby
-sidekiq['routing_rules'] = [
- # Do not re-route workers that require their own queue
- ['tags=needs_own_queue', nil],
- # Route all non-CPU-bound workers that are high urgency to `high-urgency` queue
- ['resource_boundary!=cpu&urgency=high', 'high-urgency'],
- # Route all database, gitaly and global search workers that are throttled to `throttled` queue
- ['feature_category=database,gitaly,global_search&urgency=throttled', 'throttled'],
- # Route all workers having contact with outside work to a `network-intenstive` queue
- ['has_external_dependencies=true|feature_category=hooks|tags=network', 'network-intensive'],
- # Route all import workers to the queues generated by the worker name, for
- # example, JiraImportWorker to `jira_import`, SVNWorker to `svn_worker`
- ['feature_category=import', nil],
- # Wildcard matching, route the rest to `default` queue
- ['*', 'default']
-]
-```
-
-The routing rules list is an order-matter array of tuples of query and
-corresponding queue:
-
-- The query is following a [worker matching query](#worker-matching-query) syntax.
-- The `<queue_name>` must be a valid Sidekiq queue name. If the queue name
- is `nil`, or an empty string, the worker is routed to the queue generated
- by the name of the worker instead.
-
-The query supports wildcard matching `*`, which matches all workers. As a
-result, the wildcard query must stay at the end of the list or the rules after it
-are ignored.
-
-NOTE:
-Mixing queue routing rules and queue selectors requires care to
-ensure all jobs that are scheduled and picked up by appropriate Sidekiq
-workers.
-
-## Worker matching query
-
-GitLab provides a query syntax to match a worker based on its
-attributes. This query syntax is employed by both
-[Queue routing rules](#queue-routing-rules) and
-[Queue selector](extra_sidekiq_processes.md#queue-selector). A query includes two
-components:
-
-- Attributes that can be selected.
-- Operators used to construct a query.
-
-### Available attributes
-
-> [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/261) in GitLab 13.1 (`tags`).
-
-Queue matching query works upon the worker attributes, described in
-[Sidekiq style guide](../../development/sidekiq/index.md). We support querying
-based on a subset of worker attributes:
-
-- `feature_category` - the
- [GitLab feature category](https://about.gitlab.com/direction/maturity/#category-maturity) the
- queue belongs to. For example, the `merge` queue belongs to the
- `source_code_management` category.
-- `has_external_dependencies` - whether or not the queue connects to external
- services. For example, all importers have this set to `true`.
-- `urgency` - how important it is that this queue's jobs run
- quickly. Can be `high`, `low`, or `throttled`. For example, the
- `authorized_projects` queue is used to refresh user permissions, and
- is `high` urgency.
-- `worker_name` - the worker name. Use this attribute to select a specific worker.
-- `name` - the queue name generated from the worker name. Use this attribute to select a specific queue. Because this is generated from
- the worker name, it does not change based on the result of other routing
- rules.
-- `resource_boundary` - if the queue is bound by `cpu`, `memory`, or
- `unknown`. For example, the `ProjectExportWorker` is memory bound as it has
- to load data in memory before saving it for export.
-- `tags` - short-lived annotations for queues. These are expected to frequently
- change from release to release, and may be removed entirely.
-
-`has_external_dependencies` is a boolean attribute: only the exact
-string `true` is considered true, and everything else is considered
-false.
-
-`tags` is a set, which means that `=` checks for intersecting sets, and
-`!=` checks for disjoint sets. For example, `tags=a,b` selects queues
-that have tags `a`, `b`, or both. `tags!=a,b` selects queues that have
-neither of those tags.
-
-The attributes of each worker are hard-coded in the source code. For
-convenience, we generate a
-[list of all available attributes in GitLab Community Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
-and a
-[list of all available attributes in GitLab Enterprise Edition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml).
-
-### Available operators
-
-`queue_selector` supports the following operators, listed from highest
-to lowest precedence:
-
-- `|` - the logical `OR` operator. For example, `query_a|query_b` (where `query_a`
- and `query_b` are queries made up of the other operators here) includes
- queues that match either query.
-- `&` - the logical `AND` operator. For example, `query_a&query_b` (where
- `query_a` and `query_b` are queries made up of the other operators here) will
- only include queues that match both queries.
-- `!=` - the `NOT IN` operator. For example, `feature_category!=issue_tracking`
- excludes all queues from the `issue_tracking` feature category.
-- `=` - the `IN` operator. For example, `resource_boundary=cpu` includes all
- queues that are CPU bound.
-- `,` - the concatenate set operator. For example,
- `feature_category=continuous_integration,pages` includes all queues from
- either the `continuous_integration` category or the `pages` category. This
- example is also possible using the OR operator, but allows greater brevity, as
- well as being lower precedence.
-
-The operator precedence for this syntax is fixed: it's not possible to make `AND`
-have higher precedence than `OR`.
-
-[In GitLab 12.9](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26594) and
-later, as with the standard queue group syntax above, a single `*` as the
-entire queue group selects all queues.
-
-### Migration
-
-After the Sidekiq routing rules are changed, administrators must take care
-with the migration to avoid losing jobs entirely, especially in a system with
-long queues of jobs. The migration can be done by following the migration steps
-mentioned in [Sidekiq job migration](sidekiq_job_migration.md)
+<!-- This redirect file can be deleted after <2023-02-01>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html -->
diff --git a/doc/administration/sidekiq/index.md b/doc/administration/sidekiq/index.md
index f17c248e60e..d0cb124236c 100644
--- a/doc/administration/sidekiq/index.md
+++ b/doc/administration/sidekiq/index.md
@@ -398,7 +398,7 @@ sudo gitlab-rake gitlab:features:disable_rugged
## Related topics
- [Extra Sidekiq processes](extra_sidekiq_processes.md)
-- [Extra Sidekiq routing](extra_sidekiq_routing.md)
+- [Processing specific job classes](processing_specific_job_classes.md)
- [Sidekiq health checks](sidekiq_health_check.md)
- [Using the GitLab-Sidekiq chart](https://docs.gitlab.com/charts/charts/gitlab/sidekiq/)
diff --git a/doc/administration/sidekiq/processing_specific_job_classes.md b/doc/administration/sidekiq/processing_specific_job_classes.md
new file mode 100644
index 00000000000..18ffc8a8865
--- /dev/null
+++ b/doc/administration/sidekiq/processing_specific_job_classes.md
@@ -0,0 +1,240 @@
+---
+stage: Systems
+group: Distribution
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Processing specific job classes
+
+WARNING:
+These are advanced settings. While they are used on GitLab.com, most GitLab
+instances should add more processes that all listen to all queues. This is the
+same approach we take in our [Reference Architectures](../reference_architectures/index.md).
+
+GitLab has two options for creating Sidekiq processes that only handle specific
+job classes:
+
+1. [Routing rules](#routing-rules) are used on GitLab.com. They direct jobs
+ inside the application to queue names configured by administrators. This
+ lowers the load on Redis, which is important on very large-scale deployments.
+1. [Queue selectors](#queue-selectors) perform the job selection outside the
+ application, when starting the Sidekiq process. This was used on GitLab.com
+ until September 2021, and is retained for compatibility reasons.
+
+Both of these use the same [worker matching query](#worker-matching-query)
+syntax. While they can technically be used together, most deployments should
+choose one or the other; there is no particular benefit in combining them.
+
+Routing rules must be the same across all GitLab nodes as they are part of the
+application configuration. Queue selectors can be different across GitLab nodes
+because they only change the arguments to the launched Sidekiq process.
+
+## Routing rules
+
+> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/59604) in GitLab 13.12.
+> - [Default routing rule value](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/97908) added in GitLab 15.4.
+
+We recommend most GitLab instances using routing rules to manage their Sidekiq
+queues. This allows administrators to choose single queue names for groups of
+job classes based on their attributes. The syntax is an ordered array of pairs of `[query, queue]`:
+
+1. The query is a [worker matching query](#worker-matching-query).
+1. The queue name must be a valid Sidekiq queue name. If the queue name
+ is `nil`, or an empty string, the worker is routed to the queue generated
+ by the name of the worker instead. (See [list of available job classes](#list-of-available-job-classes)
+ for more information).
+ The queue name does not have to match any existing queue name in the
+ list of available job classes.
+1. The first query matching a worker is chosen for that worker; later rules are
+ ignored.
+
+### Routing rules migration
+
+After the Sidekiq routing rules are changed, administrators must take care with
+the migration to avoid losing jobs entirely, especially in a system with long
+queues of jobs. The migration can be done by following the migration steps
+mentioned in [Sidekiq job migration](sidekiq_job_migration.md).
+
+### Detailed example
+
+This is a comprehensive example intended to show different possibilities. It is
+not a recommendation.
+
+1. Edit `/etc/gitlab/gitlab.rb`:
+
+ ```ruby
+ sidekiq['routing_rules'] = [
+ # Route all non-CPU-bound workers that are high urgency to `high-urgency` queue
+ ['resource_boundary!=cpu&urgency=high', 'high-urgency'],
+ # Route all database, gitaly and global search workers that are throttled to `throttled` queue
+ ['feature_category=database,gitaly,global_search&urgency=throttled', 'throttled'],
+ # Route all workers having contact with outside world to a `network-intenstive` queue
+ ['has_external_dependencies=true|feature_category=hooks|tags=network', 'network-intensive'],
+ # Route all import workers to the queues generated by the worker name, for
+ # example, JiraImportWorker to `jira_import`, SVNWorker to `svn_worker`
+ ['feature_category=import', 'import'],
+ # Wildcard matching, route the rest to `default` queue
+ ['*', 'default']
+ ]
+ ```
+
+ The `queue_groups` can then be set to match these generated queue names. For
+ instance:
+
+ ```ruby
+ sidekiq['queue_selector'] = false
+ sidekiq['queue_groups'] = [
+ # Run two high-urgency processes
+ 'high-urgency',
+ 'high-urgency',
+ # Run one process for throttled, network-intensive, import
+ 'throttled,network-intensive,import',
+ # Run one 'catchall' process on the default queue
+ 'default'
+ ]
+ ```
+
+1. Save the file and reconfigure GitLab:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+## Queue selectors
+
+> - [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/45) in GitLab 12.8.
+> - [Sidekiq cluster, including queue selector, moved](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/181) to GitLab Free in 12.10.
+> - [Renamed from `experimental_queue_selector` to `queue_selector`](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/147) in GitLab 13.6.
+
+The `queue_selector` option allows queue groups to be selected in a more general
+way using a [worker matching query](#worker-matching-query). After
+`queue_selector` is set, all `queue_groups` must follow the aforementioned
+syntax.
+
+### Using queue selectors
+
+1. Edit `/etc/gitlab/gitlab.rb`:
+
+ ```ruby
+ sidekiq['enable'] = true
+ sidekiq['routing_rules'] = [['*', nil]]
+ sidekiq['queue_selector'] = true
+ sidekiq['queue_groups'] = [
+ # Run all non-CPU-bound queues that are high urgency
+ 'resource_boundary!=cpu&urgency=high',
+ # Run all continuous integration and pages queues that are not high urgency
+ 'feature_category=continuous_integration,pages&urgency!=high',
+ # Run all queues
+ '*'
+ ]
+ ```
+
+1. Save the file and reconfigure GitLab:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+### Negate settings
+
+This allows you to have the Sidekiq process work on every queue **except** the
+ones you list. This is generally only used when there are multiple Sidekiq
+nodes. In this example, we exclude all import-related jobs from a Sidekiq node.
+
+1. Edit `/etc/gitlab/gitlab.rb`:
+
+ ```ruby
+ sidekiq['routing_rules'] = [['*', nil]]
+ sidekiq['negate'] = true
+ sidekiq['queue_selector'] = true
+ sidekiq['queue_groups'] = [
+ "feature_category=importers"
+ ]
+ ```
+
+1. Save the file and reconfigure GitLab:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+## Worker matching query
+
+GitLab provides a query syntax to match a worker based on its attributes. This
+query syntax is employed by both [routing rules](#routing-rules) and
+[queue selectors](#queue-selectors). A query includes two components:
+
+- Attributes that can be selected.
+- Operators used to construct a query.
+
+### Available attributes
+
+> [Introduced](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/261) in GitLab 13.1 (`tags`).
+
+Queue matching query works upon the worker attributes, described in
+[Sidekiq style guide](../../development/sidekiq/index.md). We support querying
+based on a subset of worker attributes:
+
+- `feature_category` - the
+ [GitLab feature category](https://about.gitlab.com/direction/maturity/#category-maturity) the
+ queue belongs to. For example, the `merge` queue belongs to the
+ `source_code_management` category.
+- `has_external_dependencies` - whether or not the queue connects to external
+ services. For example, all importers have this set to `true`.
+- `urgency` - how important it is that this queue's jobs run
+ quickly. Can be `high`, `low`, or `throttled`. For example, the
+ `authorized_projects` queue is used to refresh user permissions, and
+ is `high` urgency.
+- `worker_name` - the worker name. Use this attribute to select a specific worker. Find all available names in [the job classes lists](#list-of-available-job-classes) below.
+- `name` - the queue name generated from the worker name. Use this attribute to select a specific queue. Because this is generated from
+ the worker name, it does not change based on the result of other routing
+ rules.
+- `resource_boundary` - if the queue is bound by `cpu`, `memory`, or
+ `unknown`. For example, the `ProjectExportWorker` is memory bound as it has
+ to load data in memory before saving it for export.
+- `tags` - short-lived annotations for queues. These are expected to frequently
+ change from release to release, and may be removed entirely.
+
+`has_external_dependencies` is a boolean attribute: only the exact
+string `true` is considered true, and everything else is considered
+false.
+
+`tags` is a set, which means that `=` checks for intersecting sets, and
+`!=` checks for disjoint sets. For example, `tags=a,b` selects queues
+that have tags `a`, `b`, or both. `tags!=a,b` selects queues that have
+neither of those tags.
+
+### Available operators
+
+Routing rules and queue selectors support the following operators, listed from
+highest to lowest precedence:
+
+- `|` - the logical `OR` operator. For example, `query_a|query_b` (where `query_a`
+ and `query_b` are queries made up of the other operators here) includes
+ queues that match either query.
+- `&` - the logical `AND` operator. For example, `query_a&query_b` (where
+ `query_a` and `query_b` are queries made up of the other operators here) will
+ only include queues that match both queries.
+- `!=` - the `NOT IN` operator. For example, `feature_category!=issue_tracking`
+ excludes all queues from the `issue_tracking` feature category.
+- `=` - the `IN` operator. For example, `resource_boundary=cpu` includes all
+ queues that are CPU bound.
+- `,` - the concatenate set operator. For example,
+ `feature_category=continuous_integration,pages` includes all queues from
+ either the `continuous_integration` category or the `pages` category. This
+ example is also possible using the OR operator, but allows greater brevity, as
+ well as being lower precedence.
+
+The operator precedence for this syntax is fixed: it's not possible to make `AND`
+have higher precedence than `OR`.
+
+As with the standard queue group syntax above, a single `*` as the
+entire queue group selects all queues.
+
+### List of available job classes
+
+For a list of the existing Sidekiq job classes and queues, check the following
+files:
+
+- [Queues for all GitLab editions](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/all_queues.yml)
+- [Queues for GitLab Enterprise Editions only](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/all_queues.yml)
diff --git a/doc/administration/sidekiq/sidekiq_job_migration.md b/doc/administration/sidekiq/sidekiq_job_migration.md
index f61021ad4e7..d79c13c8f29 100644
--- a/doc/administration/sidekiq/sidekiq_job_migration.md
+++ b/doc/administration/sidekiq/sidekiq_job_migration.md
@@ -17,24 +17,27 @@ If the Sidekiq routing rules are changed, administrators need to take care with
1. Listen to both the old and new queues.
1. Update the routing rules.
-1. Wait until there are no publishers dispatching jobs to the old queues.
-1. Run the [Rake tasks for future jobs](#future-jobs).
-1. Wait for the old queues to be empty.
+1. [Reconfigure GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) for the changes to take effect.
+1. Run the [Rake tasks for migrating queued and future jobs](#migrate-queued-and-future-jobs).
1. Stop listening to the old queues.
-## Future jobs
+## Migrate queued and future jobs
Step 4 involves rewriting some Sidekiq job data for jobs that are already stored in Redis, but due to run in future. There are two sets of jobs to run in future: scheduled jobs and jobs to be retried. We provide a separate Rake task to migrate each set:
- `gitlab:sidekiq:migrate_jobs:retry` for jobs to be retried.
- `gitlab:sidekiq:migrate_jobs:scheduled` for scheduled jobs.
-Most of the time, running both at the same time is the correct choice. There are two separate tasks to allow for more fine-grained control where needed. To run both at once:
+Queued jobs that are yet to be run can also be migrated with a Rake task:
+
+- `gitlab:sidekiq:migrate_jobs:queued` for queued jobs to be performed asynchronously.
+
+Most of the time, running all three at the same time is the correct choice. There are three separate tasks to allow for more fine-grained control where needed. To run all three at once:
```shell
# omnibus-gitlab
-sudo gitlab-rake gitlab:sidekiq:migrate_jobs:retry gitlab:sidekiq:migrate_jobs:schedule
+sudo gitlab-rake gitlab:sidekiq:migrate_jobs:retry gitlab:sidekiq:migrate_jobs:schedule gitlab:sidekiq:migrate_jobs:queued
# source installations
-bundle exec rake gitlab:sidekiq:migrate_jobs:retry gitlab:sidekiq:migrate_jobs:schedule RAILS_ENV=production
+bundle exec rake gitlab:sidekiq:migrate_jobs:retry gitlab:sidekiq:migrate_jobs:schedule gitlab:sidekiq:migrate_jobs:queued RAILS_ENV=production
```
diff --git a/doc/architecture/blueprints/ci_data_decay/pipeline_partitioning.md b/doc/architecture/blueprints/ci_data_decay/pipeline_partitioning.md
index d61412ae1ed..9987118b6bc 100644
--- a/doc/architecture/blueprints/ci_data_decay/pipeline_partitioning.md
+++ b/doc/architecture/blueprints/ci_data_decay/pipeline_partitioning.md
@@ -248,10 +248,9 @@ smart enough to move rows between partitions on its own.
A partitioned table is called a **routing** table and it will use the `p_`
prefix which should help us with building automated tooling for query analysis.
-A table partition will be called **partition** and it can use the a
-physical partition ID as suffix, leaded by a `p` letter, for example
-`ci_builds_p101`. Existing CI tables will become **zero partitions** of the
-new routing tables. Depending on the chosen
+A table partition will be called **partition** and it can use the a physical
+partition ID as suffix, for example `ci_builds_101`. Existing CI tables will
+become **zero partitions** of the new routing tables. Depending on the chosen
[partitioning strategy](#how-do-we-want-to-partition-cicd-data) for a given
table, it is possible to have many logical partitions per one physical partition.
diff --git a/doc/ci/environments/index.md b/doc/ci/environments/index.md
index c4672b9dc7e..383127e651a 100644
--- a/doc/ci/environments/index.md
+++ b/doc/ci/environments/index.md
@@ -45,15 +45,20 @@ Deployments show up in this list only after a deployment job has created them.
## Search environments
-> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10754) in GitLab 15.5.
+> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/10754) in GitLab 15.5.
+> - [Searching environments within a folder](https://gitlab.com/gitlab-org/gitlab/-/issues/373850) was introduced in GitLab 15.7 with [Feature flag `enable_environments_search_within_folder`](https://gitlab.com/gitlab-org/gitlab/-/issues/382108). Disabled by default.
To search environments by name:
1. On the top bar, select **Main menu > Projects** and find your project.
1. On the left sidebar, select **Deployments > Environments**.
-1. In the search bar, enter your search term. Matching applies from the
- beginning of the environment name. For example, `devel` matches the
- environment name `development`, but `elop` does not.
+1. In the search bar, enter your search term.
+ - The length of your **search term should be 3 or more characters**.
+ - Matching applies from the beginning of the environment name.
+ - For example, `devel` matches the environment name `development`, but `elop` does not.
+ - For environments with a folder name format, matching applies after the base folder name.
+ - For example when the name is `review/test-app`, search term `test` matches `review/test-app`.
+ - Also searching with the folder name prefixed like `review/test` matches `review/test-app`.
## Types of environments
diff --git a/doc/development/sidekiq/index.md b/doc/development/sidekiq/index.md
index a95e94cdd34..979432a2e5b 100644
--- a/doc/development/sidekiq/index.md
+++ b/doc/development/sidekiq/index.md
@@ -27,7 +27,7 @@ There are pages with additional detail on the following topics:
All workers should include `ApplicationWorker` instead of `Sidekiq::Worker`,
which adds some convenience methods and automatically sets the queue based on
-the [routing rules](../../administration/sidekiq/extra_sidekiq_routing.md#queue-routing-rules).
+the [routing rules](../../administration/sidekiq/processing_specific_job_classes.md#routing-rules).
## Retries
@@ -88,7 +88,7 @@ error rate.
Previously, each worker had its own queue, which was automatically set based on the
worker class name. For a worker named `ProcessSomethingWorker`, the queue name
would be `process_something`. You can now route workers to a specific queue using
-[queue routing rules](../../administration/sidekiq/extra_sidekiq_routing.md#queue-routing-rules).
+[queue routing rules](../../administration/sidekiq/processing_specific_job_classes.md#routing-rules).
In GDK, new workers are routed to a queue named `default`.
If you're not sure what queue a worker uses,
diff --git a/doc/index.md b/doc/index.md
index c359ec7b639..88b8c653aae 100644
--- a/doc/index.md
+++ b/doc/index.md
@@ -20,23 +20,16 @@ description: 'Learn how to use and administer GitLab, the most scalable Git-base
# GitLab Docs
-Welcome to [GitLab](https://about.gitlab.com/) documentation.
+Welcome to the GitLab documentation!
-Here you can access the complete documentation for GitLab, the single application for the
-[entire DevOps lifecycle](#the-entire-devops-lifecycle).
-
-## Overview
-
-No matter how you use GitLab, we have documentation for you.
-
-| Essential documentation | Essential documentation |
+| | |
|:------------------------|:------------------------|
-| [**User documentation**](user/index.md)<br>Discover features and concepts for GitLab users. | [**Administrator documentation**](administration/index.md)<br/>Everything GitLab self-managed administrators need to know. |
-| [**Contributing to GitLab**](#contributing-to-gitlab)<br/>At GitLab, everyone can contribute! | [**New to Git and GitLab?**](tutorials/index.md)<br/>We have the resources to get you started. |
-| [**Build an integration with GitLab**](#build-an-integration-with-gitlab)<br/>Consult our integration documentation. | [**Coming to GitLab from another platform?**](#coming-to-gitlab-from-another-platform)<br/>Consult our guides. |
-| [**Install GitLab**](https://about.gitlab.com/install/)<br/>Installation options for different platforms. | [**Customers**](subscriptions/index.md)<br/>Information for new and existing customers. |
-| [**Update GitLab**](update/index.md)<br/>Update your GitLab self-managed instance to the latest version. | [**Reference Architectures**](administration/reference_architectures/index.md)<br/>GitLab reference architectures. |
-| [**GitLab releases**](https://about.gitlab.com/releases/)<br/>What's new in GitLab. | |
+| [**Use GitLab**](user/index.md)<br>Get started with GitLab features and functionality. | [**Administer GitLab**](administration/index.md)<br/>Administer a self-managed GitLab instance. |
+| [**New to Git and GitLab?**](tutorials/index.md)<br/>Start learning about Git and GitLab. | [**Contribute to GitLab development**](#contributing-to-gitlab)<br/>Create new GitLab functionality and documentation. |
+| [**Coming to GitLab from another platform?**](#coming-to-gitlab-from-another-platform)<br/>Learn how to move to GitLab. | [**Build an integration with GitLab**](#build-an-integration-with-gitlab)<br/>Integrate with Jira and other common applications. |
+| [**Choose a subscription**](subscriptions/index.md)<br/>Determine which subscription tier makes sense for you. | [**Install GitLab**](https://about.gitlab.com/install/)<br/>Install GitLab on different platforms. |
+| [**Reference architectures**](administration/reference_architectures/index.md)<br/>View recommended deployments at scale. | [**Update GitLab**](update/index.md)<br/>Update your GitLab self-managed instance to the latest version. |
+| [**GitLab releases**](https://about.gitlab.com/releases/)<br/>See what's new in GitLab. | |
## Popular topics
diff --git a/doc/integration/advanced_search/elasticsearch.md b/doc/integration/advanced_search/elasticsearch.md
index 94493aa6958..741dccc31fe 100644
--- a/doc/integration/advanced_search/elasticsearch.md
+++ b/doc/integration/advanced_search/elasticsearch.md
@@ -783,8 +783,8 @@ additional process dedicated to indexing a set of queues (or queue group). This
ensure that indexing queues always have a dedicated worker, while the rest of the queues have
another dedicated worker to avoid contention.
-For this purpose, use the [queue selector](../../administration/sidekiq/extra_sidekiq_processes.md#queue-selector)
-option that allows a more general selection of queue groups using a [worker matching query](../../administration/sidekiq/extra_sidekiq_routing.md#worker-matching-query).
+For this purpose, use the [queue selectors](../../administration/sidekiq/processing_specific_job_classes.md#queue-selectors)
+option that allows a more general selection of queue groups using a [worker matching query](../../administration/sidekiq/processing_specific_job_classes.md#worker-matching-query).
To handle these two queue groups, we generally recommend one of the following two options. You can either:
@@ -804,8 +804,8 @@ To create both an indexing and a non-indexing Sidekiq process in one node:
```ruby
sidekiq['enable'] = true
- sidekiq['queue_selector'] = true
- sidekiq['queue_groups'] = [
+ sidekiq['queue_selector'] = true
+ sidekiq['queue_groups'] = [
"feature_category=global_search",
"feature_category!=global_search"
]
diff --git a/doc/integration/advanced_search/elasticsearch_troubleshooting.md b/doc/integration/advanced_search/elasticsearch_troubleshooting.md
index aa6613d6f1a..11d8ad0a596 100644
--- a/doc/integration/advanced_search/elasticsearch_troubleshooting.md
+++ b/doc/integration/advanced_search/elasticsearch_troubleshooting.md
@@ -248,7 +248,7 @@ sudo gitlab-rake gitlab:elastic:clear_locked_projects
If `ElasticCommitIndexerWorker` Sidekiq workers are failing with this error during indexing, it usually means that Elasticsearch is unable to keep up with the concurrency of indexing request. To address change the following settings:
- To decrease the indexing throughput you can decrease `Bulk request concurrency` (see [Advanced Search settings](elasticsearch.md#advanced-search-configuration)). This is set to `10` by default, but you change it to as low as 1 to reduce the number of concurrent indexing operations.
-- If changing `Bulk request concurrency` didn't help, you can use the [queue selector](../../administration/sidekiq/extra_sidekiq_processes.md#queue-selector) option to [limit indexing jobs only to specific Sidekiq nodes](elasticsearch.md#index-large-instances-with-dedicated-sidekiq-nodes-or-processes), which should reduce the number of indexing requests.
+- If changing `Bulk request concurrency` didn't help, you can use the [queue selector](../../administration/sidekiq/processing_specific_job_classes.md#queue-selectors) option to [limit indexing jobs only to specific Sidekiq nodes](elasticsearch.md#index-large-instances-with-dedicated-sidekiq-nodes-or-processes), which should reduce the number of indexing requests.
### Indexing is very slow or fails with `rejected execution of coordinating operation` messages
diff --git a/doc/update/index.md b/doc/update/index.md
index dbac4304897..81911ef3d82 100644
--- a/doc/update/index.md
+++ b/doc/update/index.md
@@ -477,7 +477,7 @@ and [Helm Chart deployments](https://docs.gitlab.com/charts/). They come with ap
### 15.5.0
-- GitLab 15.4.0 introduced a default [Sidekiq routing rule](../administration/sidekiq/extra_sidekiq_routing.md) that routes all jobs to the `default` queue. For instances using [queue selectors](../administration/sidekiq/extra_sidekiq_processes.md#queue-selector), this will cause [performance problems](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1991) as some Sidekiq processes will be idle.
+- GitLab 15.4.0 introduced a default [Sidekiq routing rule](../administration/sidekiq/extra_sidekiq_routing.md) that routes all jobs to the `default` queue. For instances using [queue selectors](../administration/sidekiq/processing_specific_job_classes.md#queue-selectors), this will cause [performance problems](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1991) as some Sidekiq processes will be idle.
- The default routing rule has been reverted in 15.5.4, so upgrading to that version or later will return to the previous behavior.
- If a GitLab instance now listens only to the `default` queue (which is not currently recommended), it will be required to add this routing rule back in `/etc/gitlab/gitlab.rb`:
@@ -490,7 +490,7 @@ and [Helm Chart deployments](https://docs.gitlab.com/charts/). They come with ap
- GitLab 15.4.0 includes a [batched background migration](#batched-background-migrations) to [remove incorrect values from `expire_at` in `ci_job_artifacts` table](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/89318).
This migration might take hours or days to complete on larger GitLab instances.
- By default, Gitaly and Praefect nodes use the time server at `pool.ntp.org`. If your instance can not connect to `pool.ntp.org`, [configure the `NTP_HOST` variable](../administration/gitaly/praefect.md#customize-time-server-setting).
-- GitLab 15.4.0 introduced a default [Sidekiq routing rule](../administration/sidekiq/extra_sidekiq_routing.md) that routes all jobs to the `default` queue. For instances using [queue selectors](../administration/sidekiq/extra_sidekiq_processes.md#queue-selector), this will cause [performance problems](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1991) as some Sidekiq processes will be idle.
+- GitLab 15.4.0 introduced a default [Sidekiq routing rule](../administration/sidekiq/extra_sidekiq_routing.md) that routes all jobs to the `default` queue. For instances using [queue selectors](../administration/sidekiq/processing_specific_job_classes.md#queue-selectors), this will cause [performance problems](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1991) as some Sidekiq processes will be idle.
- The default routing rule has been reverted in 15.4.5, so upgrading to that version or later will return to the previous behavior.
- If a GitLab instance now listens only to the `default` queue (which is not currently recommended), it will be required to add this routing rule back in `/etc/gitlab/gitlab.rb`:
@@ -514,7 +514,7 @@ and [Helm Chart deployments](https://docs.gitlab.com/charts/). They come with ap
[upgraded to 15.1](#1510) before upgrading to 15.2 (and later) due to a
configuration change in Rails that can result in inconsistent ETag key
generation.
-- Some Sidekiq workers were renamed in this release. To avoid any disruption, [run the Rake tasks to migrate any pending jobs](../administration/sidekiq/sidekiq_job_migration.md#future-jobs) before starting the upgrade to GitLab 15.2.0.
+- Some Sidekiq workers were renamed in this release. To avoid any disruption, [run the Rake tasks to migrate any pending jobs](../administration/sidekiq/sidekiq_job_migration.md#migrate-queued-and-future-jobs) before starting the upgrade to GitLab 15.2.0.
- Gitaly now executes its binaries in a [runtime location](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/4670). By default on Omnibus GitLab,
this path is `/var/opt/gitlab/gitaly/run/`. If this location is mounted with `noexec`, merge requests generate the following error:
diff --git a/lib/api/concerns/packages/npm_endpoints.rb b/lib/api/concerns/packages/npm_endpoints.rb
index 4cc680068b6..2e22f65731d 100644
--- a/lib/api/concerns/packages/npm_endpoints.rb
+++ b/lib/api/concerns/packages/npm_endpoints.rb
@@ -26,6 +26,24 @@ module API
authenticate_non_get!
end
+ helpers do
+ def redirect_or_present_audit_report
+ redirect_registry_request(
+ forward_to_registry: true,
+ package_type: :npm,
+ path: options[:path][0],
+ body: Gitlab::Json.dump(request.POST),
+ target: project_or_nil,
+ method: route.request_method
+ ) do
+ authorize_read_package!(project)
+
+ status :ok
+ present []
+ end
+ end
+ end
+
params do
requires :package_name, type: String, desc: 'Package name'
end
@@ -130,6 +148,22 @@ module API
with: ::API::Entities::NpmPackage
end
end
+
+ desc 'NPM registry bulk advisory endpoint' do
+ detail 'This feature was introduced in GitLab 15.6'
+ end
+ route_setting :authentication, job_token_allowed: true, deploy_token_allowed: true
+ post '-/npm/v1/security/advisories/bulk' do
+ redirect_or_present_audit_report
+ end
+
+ desc 'NPM registry quick audit endpoint' do
+ detail 'This feature was introduced in GitLab 15.6'
+ end
+ route_setting :authentication, job_token_allowed: true, deploy_token_allowed: true
+ post '-/npm/v1/security/audits/quick' do
+ redirect_or_present_audit_report
+ end
end
end
end
diff --git a/lib/api/helpers/packages/dependency_proxy_helpers.rb b/lib/api/helpers/packages/dependency_proxy_helpers.rb
index 1ae863a5a25..4b0e63c8f3b 100644
--- a/lib/api/helpers/packages/dependency_proxy_helpers.rb
+++ b/lib/api/helpers/packages/dependency_proxy_helpers.rb
@@ -19,7 +19,9 @@ module API
def redirect_registry_request(forward_to_registry: false, package_type: nil, target: nil, **options)
if forward_to_registry && redirect_registry_request_available?(package_type, target) && maven_forwarding_ff_enabled?(package_type, target)
::Gitlab::Tracking.event(self.options[:for].name, "#{package_type}_request_forward")
- redirect(registry_url(package_type, options))
+ redirect(registry_url(package_type, options), body: options[:body])
+ # For the requests with POST methods we need to set status 307 in order to keep request's method
+ status :temporary_redirect if options[:method] == 'POST'
else
yield
end
@@ -32,7 +34,7 @@ module API
case package_type
when :npm
- "#{base_url}#{options[:package_name]}"
+ "#{base_url}#{[options[:path], options[:package_name]].compact.join('/')}"
when :pypi
"#{base_url}#{options[:package_name]}/"
when :maven
diff --git a/lib/api/nuget_project_packages.rb b/lib/api/nuget_project_packages.rb
index d549a8be035..b4c00eef3b4 100644
--- a/lib/api/nuget_project_packages.rb
+++ b/lib/api/nuget_project_packages.rb
@@ -39,18 +39,19 @@ module API
end
def project_or_group
- authorized_user_project
+ authorized_user_project(action: :read_package)
end
def snowplow_gitlab_standard_context
- { project: authorized_user_project, namespace: authorized_user_project.namespace }
+ { project: project_or_group, namespace: project_or_group.namespace }
end
def authorize_nuget_upload
+ project = project_or_group
authorize_workhorse!(
- subject: project_or_group,
+ subject: project,
has_length: false,
- maximum_size: project_or_group.actual_limits.nuget_max_file_size
+ maximum_size: project.actual_limits.nuget_max_file_size
)
end
@@ -67,8 +68,9 @@ module API
end
def upload_nuget_package_file(symbol_package: false)
- authorize_upload!(project_or_group)
- bad_request!('File is too large') if project_or_group.actual_limits.exceeded?(:nuget_max_file_size, params[:package].size)
+ project = project_or_group
+ authorize_upload!(project)
+ bad_request!('File is too large') if project.actual_limits.exceeded?(:nuget_max_file_size, params[:package].size)
file_params = params.merge(
file: params[:package],
@@ -76,7 +78,7 @@ module API
)
package = ::Packages::CreateTemporaryPackageService.new(
- project_or_group, current_user, declared_params.merge(build: current_authenticated_job)
+ project, current_user, declared_params.merge(build: current_authenticated_job)
).execute(:nuget, name: temp_file_name(symbol_package))
package_file = ::Packages::CreatePackageFileService.new(package, file_params.merge(build: current_authenticated_job))
diff --git a/lib/gitlab/database.rb b/lib/gitlab/database.rb
index 04cf056199c..51d5bfcee38 100644
--- a/lib/gitlab/database.rb
+++ b/lib/gitlab/database.rb
@@ -101,7 +101,8 @@ module Gitlab
gitlab_main: [self.database_base_models.fetch(:main)],
gitlab_ci: [self.database_base_models[:ci] || self.database_base_models.fetch(:main)], # use CI or fallback to main
gitlab_shared: database_base_models_with_gitlab_shared.values, # all models
- gitlab_internal: database_base_models.values # all models
+ gitlab_internal: database_base_models.values, # all models
+ gitlab_pm: [self.database_base_models.fetch(:main)] # package metadata models
}.with_indifferent_access.freeze
end
diff --git a/lib/gitlab/database/gitlab_schemas.yml b/lib/gitlab/database/gitlab_schemas.yml
index 704bd929595..788d343dad9 100644
--- a/lib/gitlab/database/gitlab_schemas.yml
+++ b/lib/gitlab/database/gitlab_schemas.yml
@@ -409,6 +409,10 @@ path_locks: :gitlab_main
personal_access_tokens: :gitlab_main
plan_limits: :gitlab_main
plans: :gitlab_main
+pm_licenses: :gitlab_pm
+pm_packages: :gitlab_pm
+pm_package_versions: :gitlab_pm
+pm_package_version_licenses: :gitlab_pm
pool_repositories: :gitlab_main
postgres_async_indexes: :gitlab_shared
postgres_autovacuum_activity: :gitlab_shared
diff --git a/lib/gitlab/memory/jemalloc.rb b/lib/gitlab/memory/jemalloc.rb
index e20e186cab9..81c4be0f7fc 100644
--- a/lib/gitlab/memory/jemalloc.rb
+++ b/lib/gitlab/memory/jemalloc.rb
@@ -97,7 +97,9 @@ module Gitlab
end
def file_name(extension, filename_label)
- [FILENAME_PREFIX, $$, filename_label, Time.current.to_i, extension].reject(&:blank?).join('.')
+ timestamp = Time.current.strftime('%Y-%m-%d.%H:%M:%S:%L')
+
+ [FILENAME_PREFIX, timestamp, filename_label, extension].reject(&:blank?).join('.')
end
end
end
diff --git a/lib/gitlab/memory/reports/jemalloc_stats.rb b/lib/gitlab/memory/reports/jemalloc_stats.rb
index 05f0717d7c3..720f22ddbe4 100644
--- a/lib/gitlab/memory/reports/jemalloc_stats.rb
+++ b/lib/gitlab/memory/reports/jemalloc_stats.rb
@@ -16,8 +16,9 @@ module Gitlab
# The cleanup logic will be redundant after we'll implement the uploads, which would perform the cleanup.
DEFAULT_MAX_REPORTS_STORED = 250
- def initialize(reports_path:)
+ def initialize(reports_path:, filename_label:)
@reports_path = reports_path
+ @filename_label = filename_label
# Store report in tmp subdir while it is still streaming.
# This will clearly separate finished reports from the files we are still writing to.
@@ -28,7 +29,8 @@ module Gitlab
def run
return unless active?
- Gitlab::Memory::Jemalloc.dump_stats(path: reports_path, tmp_dir: @tmp_dir, filename_label: worker_id).tap do
+ Gitlab::Memory::Jemalloc.dump_stats(path: reports_path, tmp_dir: @tmp_dir,
+ filename_label: filename_label).tap do
cleanup
end
end
@@ -39,7 +41,7 @@ module Gitlab
private
- attr_reader :reports_path
+ attr_reader :reports_path, :filename_label
def cleanup
reports_files_modified_order[0...-max_reports_stored].each do |f|
@@ -61,10 +63,6 @@ module Gitlab
end
end
- def worker_id
- ::Prometheus::PidProvider.worker_id
- end
-
def max_reports_stored
ENV["GITLAB_DIAGNOSTIC_REPORTS_JEMALLOC_MAX_REPORTS_STORED"] || DEFAULT_MAX_REPORTS_STORED
end
diff --git a/lib/gitlab/memory/reports_daemon.rb b/lib/gitlab/memory/reports_daemon.rb
index 0dfc31235e7..7070c65c705 100644
--- a/lib/gitlab/memory/reports_daemon.rb
+++ b/lib/gitlab/memory/reports_daemon.rb
@@ -24,7 +24,15 @@ module Gitlab
@reports_path =
ENV["GITLAB_DIAGNOSTIC_REPORTS_PATH"] || DEFAULT_REPORTS_PATH
- @reports = [Gitlab::Memory::Reports::JemallocStats.new(reports_path: reports_path)]
+ # Set unique uuid for every ReportsDaemon instance.
+ # Because we spawn a single instance of it per process, it will also uniquely identify the worker.
+ # Unlike `::Prometheus::PidProvider.worker_id`, this uuid will remain unique across all Puma clusters.
+ # This way, we can identify reports that were produced from the same worker process during its lifetime.
+ @worker_uuid = SecureRandom.uuid
+
+ @reports = [
+ Gitlab::Memory::Reports::JemallocStats.new(reports_path: reports_path, filename_label: filename_label)
+ ]
init_prometheus_metrics
end
@@ -54,7 +62,11 @@ module Gitlab
private
- attr_reader :alive, :reports
+ attr_reader :alive, :reports, :worker_uuid
+
+ def filename_label
+ [worker_id, worker_uuid].join(".")
+ end
# Returns the sleep interval with a random adjustment.
# The random adjustment is put in place to ensure continued availability.
@@ -70,7 +82,8 @@ module Gitlab
perf_report: label,
duration_s: duration_s.round(2),
cpu_s: cpu_s.round(2),
- perf_report_size_bytes: size
+ perf_report_size_bytes: size,
+ perf_report_worker_uuid: worker_uuid
)
end
diff --git a/lib/gitlab/middleware/compressed_json.rb b/lib/gitlab/middleware/compressed_json.rb
index f66dfe44054..80916eab5ac 100644
--- a/lib/gitlab/middleware/compressed_json.rb
+++ b/lib/gitlab/middleware/compressed_json.rb
@@ -4,7 +4,18 @@ module Gitlab
module Middleware
class CompressedJson
COLLECTOR_PATH = '/api/v4/error_tracking/collector'
+ PACKAGES_PATH = %r{
+ \A/api/v4/ (?# prefix)
+ (?:projects/
+ (?<project_id>
+ .+ (?# at least one character)
+ )/
+ )? (?# projects segment)
+ packages/npm/-/npm/v1/security/
+ (?:(?:advisories/bulk)|(?:audits/quick))\z (?# end)
+ }xi.freeze
MAXIMUM_BODY_SIZE = 200.kilobytes.to_i
+ UNSAFE_CHARACTERS = %r{[!"#&'()*+,./:;<>=?@\[\]^`{}|~$]}xi.freeze
def initialize(app)
@app = app
@@ -60,7 +71,21 @@ module Gitlab
end
def match_path?(env)
- env['PATH_INFO'].start_with?((File.join(relative_url, COLLECTOR_PATH)))
+ env['PATH_INFO'].start_with?((File.join(relative_url, COLLECTOR_PATH))) ||
+ match_packages_path?(env)
+ end
+
+ def match_packages_path?(env)
+ match_data = env['PATH_INFO'].delete_prefix(relative_url).match(PACKAGES_PATH)
+ return false unless match_data
+
+ return true unless match_data[:project_id] # instance level endpoint was matched
+
+ url_encoded?(match_data[:project_id])
+ end
+
+ def url_encoded?(project_id)
+ project_id !~ UNSAFE_CHARACTERS
end
end
end
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 3995ce97c92..e76e6c1c3b5 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -42716,6 +42716,9 @@ msgstr ""
msgid "Todos|Design"
msgstr ""
+msgid "Todos|Due %{due_date}"
+msgstr ""
+
msgid "Todos|Epic"
msgstr ""
diff --git a/spec/features/projects/user_sees_sidebar_spec.rb b/spec/features/projects/user_sees_sidebar_spec.rb
index e2498928fa0..219c6bc678c 100644
--- a/spec/features/projects/user_sees_sidebar_spec.rb
+++ b/spec/features/projects/user_sees_sidebar_spec.rb
@@ -220,7 +220,7 @@ RSpec.describe 'Projects > User sees sidebar' do
it 'does not show fork button' do
visit project_path(project)
- within('.count-buttons') do
+ within('.project-repo-buttons') do
expect(page).not_to have_link 'Fork'
end
end
diff --git a/spec/helpers/todos_helper_spec.rb b/spec/helpers/todos_helper_spec.rb
index 7c91dd0570f..48aff14180e 100644
--- a/spec/helpers/todos_helper_spec.rb
+++ b/spec/helpers/todos_helper_spec.rb
@@ -351,4 +351,32 @@ RSpec.describe TodosHelper do
it { expect(helper.todo_action_name(alert_todo)).to eq(expected_action_name) }
end
end
+
+ describe '#todo_due_date' do
+ subject(:result) { helper.todo_due_date(todo) }
+
+ context 'due date is today' do
+ let_it_be(:issue_with_today_due_date) do
+ create(:issue, title: 'Issue 1', project: project, due_date: Date.current)
+ end
+
+ let(:todo) do
+ create(:todo, project: issue_with_today_due_date.project, target: issue_with_today_due_date, note: note)
+ end
+
+ it { expect(result).to match('Due today') }
+ end
+
+ context 'due date is not today' do
+ let_it_be(:issue_with_tomorrow_due_date) do
+ create(:issue, title: 'Issue 1', project: project, due_date: Date.tomorrow)
+ end
+
+ let(:todo) do
+ create(:todo, project: issue_with_tomorrow_due_date.project, target: issue_with_tomorrow_due_date, note: note)
+ end
+
+ it { expect(result).to match("Due #{l(Date.tomorrow, format: Date::DATE_FORMATS[:medium])}") }
+ end
+ end
end
diff --git a/spec/lib/gitlab/memory/jemalloc_spec.rb b/spec/lib/gitlab/memory/jemalloc_spec.rb
index 414d6017534..9986af4bc13 100644
--- a/spec/lib/gitlab/memory/jemalloc_spec.rb
+++ b/spec/lib/gitlab/memory/jemalloc_spec.rb
@@ -33,7 +33,7 @@ RSpec.describe Gitlab::Memory::Jemalloc do
it 'writes stats JSON file' do
file_path = described_class.dump_stats(path: outdir, tmp_dir: tmp_outdir, format: format)
- file = Dir.entries(outdir).find { |e| e.match(/jemalloc_stats\.#{$$}\.\d+\.json$/) }
+ file = Dir.entries(outdir).find { |e| e.match(/jemalloc_stats\..*\.json$/) }
expect(file).not_to be_nil
expect(file_path).to eq(File.join(outdir, file))
expect(File.read(file_path)).to eq(output)
@@ -68,11 +68,13 @@ RSpec.describe Gitlab::Memory::Jemalloc do
end
context 'when custom filename label is passed' do
- include_examples 'writes stats text file', 'puma_0', /jemalloc_stats\.#{$$}\.puma_0\.\d+\.txt$/
+ include_examples 'writes stats text file',
+ 'puma_0.some-uuid',
+ /jemalloc_stats\..*\.puma_0\.some-uuid\.txt$/
end
context 'when custom filename label is not passed' do
- include_examples 'writes stats text file', nil, /jemalloc_stats\.#{$$}\.\d+\.txt$/
+ include_examples 'writes stats text file', nil, /jemalloc_stats\..*\.txt$/
end
end
end
diff --git a/spec/lib/gitlab/memory/reports/jemalloc_stats_spec.rb b/spec/lib/gitlab/memory/reports/jemalloc_stats_spec.rb
index b327a40bc2c..de27c8352f9 100644
--- a/spec/lib/gitlab/memory/reports/jemalloc_stats_spec.rb
+++ b/spec/lib/gitlab/memory/reports/jemalloc_stats_spec.rb
@@ -5,7 +5,8 @@ require 'spec_helper'
RSpec.describe Gitlab::Memory::Reports::JemallocStats do
let_it_be(:outdir) { Dir.mktmpdir }
- let(:jemalloc_stats) { described_class.new(reports_path: outdir) }
+ let(:filename_label) { SecureRandom.uuid }
+ let(:jemalloc_stats) { described_class.new(reports_path: outdir, filename_label: filename_label) }
after do
FileUtils.rm_f(outdir)
@@ -26,14 +27,14 @@ RSpec.describe Gitlab::Memory::Reports::JemallocStats do
.to receive(:dump_stats)
.with(path: outdir,
tmp_dir: File.join(outdir, '/tmp'),
- filename_label: worker_id)
+ filename_label: filename_label)
.and_return(report_path)
expect(jemalloc_stats.run).to eq(report_path)
end
describe 'reports cleanup' do
- let(:jemalloc_stats) { described_class.new(reports_path: outdir) }
+ let(:jemalloc_stats) { described_class.new(reports_path: outdir, filename_label: filename_label) }
before do
stub_env('GITLAB_DIAGNOSTIC_REPORTS_JEMALLOC_MAX_REPORTS_STORED', 3)
diff --git a/spec/lib/gitlab/memory/reports_daemon_spec.rb b/spec/lib/gitlab/memory/reports_daemon_spec.rb
index 0473e170502..ab616e92b00 100644
--- a/spec/lib/gitlab/memory/reports_daemon_spec.rb
+++ b/spec/lib/gitlab/memory/reports_daemon_spec.rb
@@ -38,6 +38,7 @@ RSpec.describe Gitlab::Memory::ReportsDaemon, :aggregate_failures do
hash_including(
:duration_s,
:cpu_s,
+ :perf_report_worker_uuid,
perf_report_size_bytes: file_size,
message: 'finished',
pid: Process.pid,
diff --git a/spec/lib/gitlab/middleware/compressed_json_spec.rb b/spec/lib/gitlab/middleware/compressed_json_spec.rb
index 6d49ab58d5d..1444e6a9881 100644
--- a/spec/lib/gitlab/middleware/compressed_json_spec.rb
+++ b/spec/lib/gitlab/middleware/compressed_json_spec.rb
@@ -9,6 +9,7 @@ RSpec.describe Gitlab::Middleware::CompressedJson do
let(:app) { double(:app) }
let(:middleware) { described_class.new(app) }
let(:content_type) { 'application/json' }
+ let(:relative_url_root) { '/gitlab' }
let(:env) do
{
'HTTP_CONTENT_ENCODING' => 'gzip',
@@ -31,6 +32,43 @@ RSpec.describe Gitlab::Middleware::CompressedJson do
end
end
+ shared_examples 'passes input' do
+ it 'keeps the original input' do
+ expect(app).to receive(:call)
+
+ middleware.call(env)
+
+ expect(env['rack.input'].read).to eq(input)
+ expect(env['HTTP_CONTENT_ENCODING']).to eq('gzip')
+ end
+ end
+
+ shared_context 'with relative url' do
+ before do
+ stub_config_setting(relative_url_root: relative_url_root)
+ end
+ end
+
+ shared_examples 'handles non integer project ID' do
+ context 'with a URL-encoded project ID' do
+ let_it_be(:project_id) { 'gitlab-org%2fgitlab' }
+
+ it_behaves_like 'decompress middleware'
+ end
+
+ context 'with a non URL-encoded project ID' do
+ let_it_be(:project_id) { '1/repository/files/api/v4' }
+
+ it_behaves_like 'passes input'
+ end
+
+ context 'with a blank project ID' do
+ let_it_be(:project_id) { '' }
+
+ it_behaves_like 'passes input'
+ end
+ end
+
describe '#call' do
context 'with collector route' do
let(:path) { '/api/v4/error_tracking/collector/1/store' }
@@ -42,31 +80,80 @@ RSpec.describe Gitlab::Middleware::CompressedJson do
it_behaves_like 'decompress middleware'
end
+
+ include_context 'with relative url' do
+ let(:path) { "#{relative_url_root}/api/v4/error_tracking/collector/1/store" }
+
+ it_behaves_like 'decompress middleware'
+ end
end
- context 'with collector route under relative url' do
- let(:path) { '/gitlab/api/v4/error_tracking/collector/1/store' }
+ context 'with packages route' do
+ context 'with instance level endpoint' do
+ context 'with npm advisory bulk url' do
+ let(:path) { '/api/v4/packages/npm/-/npm/v1/security/advisories/bulk' }
+
+ it_behaves_like 'decompress middleware'
+
+ include_context 'with relative url' do
+ let(:path) { "#{relative_url_root}/api/v4/packages/npm/-/npm/v1/security/advisories/bulk" }
+
+ it_behaves_like 'decompress middleware'
+ end
+ end
+
+ context 'with npm quick audit url' do
+ let(:path) { '/api/v4/packages/npm/-/npm/v1/security/audits/quick' }
- before do
- stub_config_setting(relative_url_root: '/gitlab')
+ it_behaves_like 'decompress middleware'
+
+ include_context 'with relative url' do
+ let(:path) { "#{relative_url_root}/api/v4/packages/npm/-/npm/v1/security/audits/quick" }
+
+ it_behaves_like 'decompress middleware'
+ end
+ end
end
- it_behaves_like 'decompress middleware'
- end
+ context 'with project level endpoint' do
+ let_it_be(:project_id) { 1 }
- context 'with some other route' do
- let(:path) { '/api/projects/123' }
+ context 'with npm advisory bulk url' do
+ let(:path) { "/api/v4/projects/#{project_id}/packages/npm/-/npm/v1/security/advisories/bulk" }
- it 'keeps the original input' do
- expect(app).to receive(:call)
+ it_behaves_like 'decompress middleware'
- middleware.call(env)
+ include_context 'with relative url' do
+ let(:path) { "#{relative_url_root}/api/v4/projects/#{project_id}/packages/npm/-/npm/v1/security/advisories/bulk" } # rubocop disable Layout/LineLength
- expect(env['rack.input'].read).to eq(input)
- expect(env['HTTP_CONTENT_ENCODING']).to eq('gzip')
+ it_behaves_like 'decompress middleware'
+ end
+
+ it_behaves_like 'handles non integer project ID'
+ end
+
+ context 'with npm quick audit url' do
+ let(:path) { "/api/v4/projects/#{project_id}/packages/npm/-/npm/v1/security/audits/quick" }
+
+ it_behaves_like 'decompress middleware'
+
+ include_context 'with relative url' do
+ let(:path) { "#{relative_url_root}/api/v4/projects/#{project_id}/packages/npm/-/npm/v1/security/audits/quick" } # rubocop disable Layout/LineLength
+
+ it_behaves_like 'decompress middleware'
+ end
+
+ it_behaves_like 'handles non integer project ID'
+ end
end
end
+ context 'with some other route' do
+ let(:path) { '/api/projects/123' }
+
+ it_behaves_like 'passes input'
+ end
+
context 'payload is too large' do
let(:body_limit) { Gitlab::Middleware::CompressedJson::MAXIMUM_BODY_SIZE }
let(:decompressed_input) { 'a' * (body_limit + 100) }
diff --git a/spec/requests/api/npm_instance_packages_spec.rb b/spec/requests/api/npm_instance_packages_spec.rb
index 698885ddcf4..fa80d4f3df8 100644
--- a/spec/requests/api/npm_instance_packages_spec.rb
+++ b/spec/requests/api/npm_instance_packages_spec.rb
@@ -33,4 +33,16 @@ RSpec.describe API::NpmInstancePackages do
let(:url) { api("/packages/npm/-/package/#{package_name}/dist-tags/#{tag_name}") }
end
end
+
+ describe 'POST /api/v4/packages/npm/-/npm/v1/security/advisories/bulk' do
+ it_behaves_like 'handling audit request', path: 'advisories/bulk', scope: :instance do
+ let(:url) { api('/packages/npm/-/npm/v1/security/advisories/bulk') }
+ end
+ end
+
+ describe 'POST /api/v4/packages/npm/-/npm/v1/security/audits/quick' do
+ it_behaves_like 'handling audit request', path: 'audits/quick', scope: :instance do
+ let(:url) { api('/packages/npm/-/npm/v1/security/audits/quick') }
+ end
+ end
end
diff --git a/spec/requests/api/npm_project_packages_spec.rb b/spec/requests/api/npm_project_packages_spec.rb
index 373327787a2..3af4215052e 100644
--- a/spec/requests/api/npm_project_packages_spec.rb
+++ b/spec/requests/api/npm_project_packages_spec.rb
@@ -42,6 +42,18 @@ RSpec.describe API::NpmProjectPackages do
end
end
+ describe 'POST /api/v4/projects/:id/packages/npm/-/npm/v1/security/advisories/bulk' do
+ it_behaves_like 'handling audit request', path: 'advisories/bulk', scope: :project do
+ let(:url) { api("/projects/#{project.id}/packages/npm/-/npm/v1/security/advisories/bulk") }
+ end
+ end
+
+ describe 'POST /api/v4/projects/:id/packages/npm/-/npm/v1/security/audits/quick' do
+ it_behaves_like 'handling audit request', path: 'audits/quick', scope: :project do
+ let(:url) { api("/projects/#{project.id}/packages/npm/-/npm/v1/security/audits/quick") }
+ end
+ end
+
describe 'GET /api/v4/projects/:id/packages/npm/*package_name/-/*file_name' do
let(:snowplow_gitlab_standard_context) { { project: project, namespace: project.namespace } }
let(:package_file) { package.package_files.first }
diff --git a/spec/requests/api/nuget_project_packages_spec.rb b/spec/requests/api/nuget_project_packages_spec.rb
index f608f296295..4617583493d 100644
--- a/spec/requests/api/nuget_project_packages_spec.rb
+++ b/spec/requests/api/nuget_project_packages_spec.rb
@@ -9,38 +9,65 @@ RSpec.describe API::NugetProjectPackages do
let_it_be_with_reload(:project) { create(:project, :public) }
let_it_be(:deploy_token) { create(:deploy_token, read_package_registry: true, write_package_registry: true) }
let_it_be(:project_deploy_token) { create(:project_deploy_token, deploy_token: deploy_token, project: project) }
+ let_it_be(:package_name) { 'Dummy.Package' }
let(:target) { project }
let(:target_type) { 'projects' }
+ shared_examples 'accept get request on private project with access to package registry for everyone' do
+ subject { get api(url) }
+
+ before do
+ update_visibility_to(Gitlab::VisibilityLevel::PRIVATE)
+ project.project_feature.update!(package_registry_access_level: ProjectFeature::PUBLIC)
+ end
+
+ it_behaves_like 'returning response status', :ok
+ end
+
describe 'GET /api/v4/projects/:id/packages/nuget' do
+ let(:url) { "/projects/#{target.id}/packages/nuget/index.json" }
+
it_behaves_like 'handling nuget service requests' do
- let(:url) { "/projects/#{target.id}/packages/nuget/index.json" }
let(:snowplow_gitlab_standard_context) { { project: project, namespace: project.namespace } }
end
+
+ it_behaves_like 'accept get request on private project with access to package registry for everyone'
end
describe 'GET /api/v4/projects/:id/packages/nuget/metadata/*package_name/index' do
- it_behaves_like 'handling nuget metadata requests with package name' do
- let(:url) { "/projects/#{target.id}/packages/nuget/metadata/#{package_name}/index.json" }
+ let(:url) { "/projects/#{target.id}/packages/nuget/metadata/#{package_name}/index.json" }
+
+ it_behaves_like 'handling nuget metadata requests with package name'
+
+ it_behaves_like 'accept get request on private project with access to package registry for everyone' do
+ let_it_be(:packages) { create_list(:nuget_package, 5, :with_metadatum, name: package_name, project: project) }
end
end
describe 'GET /api/v4/projects/:id/packages/nuget/metadata/*package_name/*package_version' do
- it_behaves_like 'handling nuget metadata requests with package name and package version' do
- let(:url) { "/projects/#{target.id}/packages/nuget/metadata/#{package_name}/#{package.version}.json" }
+ let(:url) { "/projects/#{target.id}/packages/nuget/metadata/#{package_name}/#{package.version}.json" }
+
+ it_behaves_like 'handling nuget metadata requests with package name and package version'
+
+ it_behaves_like 'accept get request on private project with access to package registry for everyone' do
+ let_it_be(:package) { create(:nuget_package, :with_metadatum, name: package_name, project: project) }
end
end
describe 'GET /api/v4/projects/:id/packages/nuget/query' do
+ let(:url) { "/projects/#{target.id}/packages/nuget/query?#{query_parameters.to_query}" }
+
it_behaves_like 'handling nuget search requests' do
- let(:url) { "/projects/#{target.id}/packages/nuget/query?#{query_parameters.to_query}" }
let(:snowplow_gitlab_standard_context) { { project: project, namespace: project.namespace } }
end
+
+ it_behaves_like 'accept get request on private project with access to package registry for everyone' do
+ let_it_be(:query_parameters) { { q: 'query', take: 5, skip: 0, prerelease: true } }
+ end
end
describe 'GET /api/v4/projects/:id/packages/nuget/download/*package_name/index' do
- let_it_be(:package_name) { 'Dummy.Package' }
let_it_be(:packages) { create_list(:nuget_package, 5, name: package_name, project: project) }
let(:url) { "/projects/#{target.id}/packages/nuget/download/#{package_name}/index.json" }
@@ -88,10 +115,11 @@ RSpec.describe API::NugetProjectPackages do
it_behaves_like 'rejects nuget access with unknown target id'
it_behaves_like 'rejects nuget access with invalid target id'
+
+ it_behaves_like 'accept get request on private project with access to package registry for everyone'
end
describe 'GET /api/v4/projects/:id/packages/nuget/download/*package_name/*package_version/*package_filename' do
- let_it_be(:package_name) { 'Dummy.Package' }
let_it_be(:package) { create(:nuget_package, :with_symbol_package, project: project, name: package_name) }
let(:format) { 'nupkg' }
@@ -134,6 +162,8 @@ RSpec.describe API::NugetProjectPackages do
it_behaves_like params[:shared_examples_name], params[:user_role], params[:expected_status], params[:member]
end
+
+ it_behaves_like 'accept get request on private project with access to package registry for everyone'
end
it_behaves_like 'deploy token for package GET requests' do
diff --git a/spec/services/packages/debian/process_changes_service_spec.rb b/spec/services/packages/debian/process_changes_service_spec.rb
index a45dd68cd6e..27b49a13d52 100644
--- a/spec/services/packages/debian/process_changes_service_spec.rb
+++ b/spec/services/packages/debian/process_changes_service_spec.rb
@@ -42,7 +42,7 @@ RSpec.describe Packages::Debian::ProcessChangesService do
end
context 'marked as pending_destruction' do
- it 'creates a package' do
+ it 'does not re-use the existing package' do
existing_package.pending_destruction!
expect { subject.execute }
@@ -73,7 +73,7 @@ RSpec.describe Packages::Debian::ProcessChangesService do
end
end
- it 'remove the package file', :aggregate_failures do
+ it 're-raise error', :aggregate_failures do
expect(::Packages::Debian::GenerateDistributionWorker).not_to receive(:perform_async)
expect { subject.execute }
.to not_change { Packages::Package.count }
diff --git a/spec/services/packages/debian/process_package_file_service_spec.rb b/spec/services/packages/debian/process_package_file_service_spec.rb
new file mode 100644
index 00000000000..f3d6cdee7b4
--- /dev/null
+++ b/spec/services/packages/debian/process_package_file_service_spec.rb
@@ -0,0 +1,160 @@
+# frozen_string_literal: true
+require 'spec_helper'
+
+RSpec.describe Packages::Debian::ProcessPackageFileService do
+ describe '#execute' do
+ let_it_be(:user) { create(:user) }
+ let_it_be_with_reload(:distribution) { create(:debian_project_distribution, :with_file, codename: 'unstable') }
+
+ let!(:incoming) { create(:debian_incoming, project: distribution.project) }
+
+ let(:distribution_name) { distribution.codename }
+ let(:debian_file_metadatum) { package_file.debian_file_metadatum }
+
+ subject { described_class.new(package_file, user, distribution_name, component_name) }
+
+ RSpec.shared_context 'with Debian package file' do |file_name|
+ let(:package_file) { incoming.package_files.with_file_name(file_name).first }
+ end
+
+ using RSpec::Parameterized::TableSyntax
+
+ where(:case_name, :expected_file_type, :file_name, :component_name) do
+ 'with a deb' | 'deb' | 'libsample0_1.2.3~alpha2_amd64.deb' | 'main'
+ 'with an udeb' | 'udeb' | 'sample-udeb_1.2.3~alpha2_amd64.udeb' | 'contrib'
+ end
+
+ with_them do
+ include_context 'with Debian package file', params[:file_name] do
+ it 'creates package and updates package file', :aggregate_failures do
+ expect(::Packages::Debian::GenerateDistributionWorker)
+ .to receive(:perform_async).with(:project, distribution.id)
+ expect { subject.execute }
+ .to change(Packages::Package, :count).from(1).to(2)
+ .and not_change(Packages::PackageFile, :count)
+ .and change(incoming.package_files, :count).from(7).to(6)
+ .and change(debian_file_metadatum, :file_type).from('unknown').to(expected_file_type)
+ .and change(debian_file_metadatum, :component).from(nil).to(component_name)
+
+ created_package = Packages::Package.last
+ expect(created_package.name).to eq 'sample'
+ expect(created_package.version).to eq '1.2.3~alpha2'
+ expect(created_package.creator).to eq user
+ end
+
+ context 'with existing package' do
+ let_it_be_with_reload(:existing_package) do
+ create(:debian_package, name: 'sample', version: '1.2.3~alpha2', project: distribution.project)
+ end
+
+ before do
+ existing_package.update!(debian_distribution: distribution)
+ end
+
+ it 'does not create a package and assigns the package_file to the existing package' do
+ expect(::Packages::Debian::GenerateDistributionWorker)
+ .to receive(:perform_async).with(:project, distribution.id)
+ expect { subject.execute }
+ .to not_change(Packages::Package, :count)
+ .and not_change(Packages::PackageFile, :count)
+ .and change(incoming.package_files, :count).from(7).to(6)
+ .and change(package_file, :package).from(incoming).to(existing_package)
+ .and change(debian_file_metadatum, :file_type).from('unknown').to(expected_file_type.to_s)
+ .and change(debian_file_metadatum, :component).from(nil).to(component_name)
+ end
+
+ context 'when marked as pending_destruction' do
+ it 'does not re-use the existing package' do
+ existing_package.pending_destruction!
+
+ expect { subject.execute }
+ .to change(Packages::Package, :count).by(1)
+ .and not_change(Packages::PackageFile, :count)
+ end
+ end
+ end
+ end
+ end
+
+ context 'without a distribution' do
+ let(:package_file) { incoming.package_files.with_file_name('libsample0_1.2.3~alpha2_amd64.deb').first }
+ let(:component_name) { 'main' }
+
+ before do
+ distribution.destroy!
+ end
+
+ it 'raise ActiveRecord::RecordNotFound', :aggregate_failures do
+ expect(::Packages::Debian::GenerateDistributionWorker).not_to receive(:perform_async)
+ expect { subject.execute }
+ .to not_change(Packages::Package, :count)
+ .and not_change(Packages::PackageFile, :count)
+ .and not_change(incoming.package_files, :count)
+ .and raise_error(ActiveRecord::RecordNotFound)
+ end
+ end
+
+ context 'with package file without Debian metadata' do
+ let!(:package_file) { create(:debian_package_file, without_loaded_metadatum: true) }
+ let(:component_name) { 'main' }
+
+ it 'raise ArgumentError', :aggregate_failures do
+ expect(::Packages::Debian::GenerateDistributionWorker).not_to receive(:perform_async)
+ expect { subject.execute }
+ .to not_change(Packages::Package, :count)
+ .and not_change(Packages::PackageFile, :count)
+ .and not_change(incoming.package_files, :count)
+ .and raise_error(ArgumentError, 'package file without Debian metadata')
+ end
+ end
+
+ context 'with already processed package file' do
+ let!(:package_file) { create(:debian_package_file) }
+ let(:component_name) { 'main' }
+
+ it 'raise ArgumentError', :aggregate_failures do
+ expect(::Packages::Debian::GenerateDistributionWorker).not_to receive(:perform_async)
+ expect { subject.execute }
+ .to not_change(Packages::Package, :count)
+ .and not_change(Packages::PackageFile, :count)
+ .and not_change(incoming.package_files, :count)
+ .and raise_error(ArgumentError, 'already processed package file')
+ end
+ end
+
+ context 'with invalid package file type' do
+ let(:package_file) { incoming.package_files.with_file_name('sample_1.2.3~alpha2.tar.xz').first }
+ let(:component_name) { 'main' }
+
+ it 'raise ArgumentError', :aggregate_failures do
+ expect(::Packages::Debian::GenerateDistributionWorker).not_to receive(:perform_async)
+ expect { subject.execute }
+ .to not_change(Packages::Package, :count)
+ .and not_change(Packages::PackageFile, :count)
+ .and not_change(incoming.package_files, :count)
+ .and raise_error(ArgumentError, 'invalid package file type: source')
+ end
+ end
+
+ context 'when creating package fails' do
+ let(:package_file) { incoming.package_files.with_file_name('libsample0_1.2.3~alpha2_amd64.deb').first }
+ let(:component_name) { 'main' }
+
+ before do
+ allow_next_instance_of(::Packages::Debian::FindOrCreatePackageService) do |find_or_create_package_service|
+ allow(find_or_create_package_service)
+ .to receive(:execute).and_raise(ActiveRecord::ConnectionTimeoutError, 'connect timeout')
+ end
+ end
+
+ it 're-raise error', :aggregate_failures do
+ expect(::Packages::Debian::GenerateDistributionWorker).not_to receive(:perform_async)
+ expect { subject.execute }
+ .to not_change(Packages::Package, :count)
+ .and not_change(Packages::PackageFile, :count)
+ .and not_change(incoming.package_files, :count)
+ .and raise_error(ActiveRecord::ConnectionTimeoutError, 'connect timeout')
+ end
+ end
+ end
+end
diff --git a/spec/support/shared_examples/requests/api/npm_packages_shared_examples.rb b/spec/support/shared_examples/requests/api/npm_packages_shared_examples.rb
index 85ac2b5e1ea..b55639a6b82 100644
--- a/spec/support/shared_examples/requests/api/npm_packages_shared_examples.rb
+++ b/spec/support/shared_examples/requests/api/npm_packages_shared_examples.rb
@@ -323,6 +323,171 @@ RSpec.shared_examples 'handling get metadata requests' do |scope: :project|
end
end
+RSpec.shared_examples 'handling audit request' do |path:, scope: :project|
+ using RSpec::Parameterized::TableSyntax
+
+ let(:headers) { {} }
+ let(:params) do
+ ActiveSupport::Gzip.compress(
+ Gitlab::Json.dump({
+ '@gitlab-org/npm-test': ['1.0.6'],
+ 'call-bind': ['1.0.2']
+ })
+ )
+ end
+
+ let(:default_headers) do
+ { 'HTTP_CONTENT_ENCODING' => 'gzip', 'CONTENT_TYPE' => 'application/json' }
+ end
+
+ subject { post(url, headers: headers.merge(default_headers), params: params) }
+
+ shared_examples 'accept audit request' do |status:|
+ it 'accepts the audit request' do
+ subject
+
+ expect(response).to have_gitlab_http_status(status)
+ expect(response.media_type).to eq('application/json')
+ expect(json_response).to eq([])
+ end
+ end
+
+ shared_examples 'reject audit request' do |status:|
+ it 'rejects the audit request' do
+ subject
+
+ expect(response).to have_gitlab_http_status(status)
+ end
+ end
+
+ shared_examples 'redirect audit request' do |status:|
+ it 'redirects audit request' do
+ subject
+
+ expect(response).to have_gitlab_http_status(status)
+ expect(response.headers['Location']).to eq("https://registry.npmjs.org/-/npm/v1/security/#{path}")
+ end
+ end
+
+ shared_examples 'handling all conditions' do
+ include_context 'dependency proxy helpers context'
+
+ where(:auth, :request_forward, :visibility, :user_role, :expected_result, :expected_status) do
+ nil | true | :public | nil | :reject | :unauthorized
+ nil | false | :public | nil | :reject | :unauthorized
+ nil | true | :private | nil | :reject | :unauthorized
+ nil | false | :private | nil | :reject | :unauthorized
+ nil | true | :internal | nil | :reject | :unauthorized
+ nil | false | :internal | nil | :reject | :unauthorized
+
+ :oauth | true | :public | :guest | :redirect | :temporary_redirect
+ :oauth | true | :public | :reporter | :redirect | :temporary_redirect
+ :oauth | false | :public | :guest | :accept | :ok
+ :oauth | false | :public | :reporter | :accept | :ok
+ :oauth | true | :private | :reporter | :redirect | :temporary_redirect
+ :oauth | false | :private | :guest | :reject | :forbidden
+ :oauth | false | :private | :reporter | :accept | :ok
+ :oauth | true | :private | :guest | :redirect | :temporary_redirect
+ :oauth | true | :internal | :guest | :redirect | :temporary_redirect
+ :oauth | true | :internal | :reporter | :redirect | :temporary_redirect
+ :oauth | false | :internal | :guest | :accept | :ok
+ :oauth | false | :internal | :reporter | :accept | :ok
+
+ :personal_access_token | true | :public | :guest | :redirect | :temporary_redirect
+ :personal_access_token | true | :public | :reporter | :redirect | :temporary_redirect
+ :personal_access_token | false | :public | :guest | :accept | :ok
+ :personal_access_token | false | :public | :reporter | :accept | :ok
+ :personal_access_token | true | :private | :guest | :redirect | :temporary_redirect
+ :personal_access_token | true | :private | :reporter | :redirect | :temporary_redirect
+ :personal_access_token | false | :private | :guest | :reject | :forbidden # instance might fail
+ :personal_access_token | false | :private | :reporter | :accept | :ok
+ :personal_access_token | true | :internal | :guest | :redirect | :temporary_redirect
+ :personal_access_token | true | :internal | :reporter | :redirect | :temporary_redirect
+ :personal_access_token | false | :internal | :guest | :accept | :ok
+ :personal_access_token | false | :internal | :reporter | :accept | :ok
+
+ :job_token | true | :public | :developer | :redirect | :temporary_redirect
+ :job_token | false | :public | :developer | :accept | :ok
+ :job_token | true | :private | :developer | :redirect | :temporary_redirect
+ :job_token | false | :private | :developer | :accept | :ok
+ :job_token | true | :internal | :developer | :redirect | :temporary_redirect
+ :job_token | false | :internal | :developer | :accept | :ok
+
+ :deploy_token | true | :public | nil | :redirect | :temporary_redirect
+ :deploy_token | false | :public | nil | :accept | :ok
+ :deploy_token | true | :private | nil | :redirect | :temporary_redirect
+ :deploy_token | false | :private | nil | :accept | :ok
+ :deploy_token | true | :internal | nil | :redirect | :temporary_redirect
+ :deploy_token | false | :internal | nil | :accept | :ok
+ end
+
+ with_them do
+ let(:headers) do
+ case auth
+ when :oauth
+ build_token_auth_header(token.plaintext_token)
+ when :personal_access_token
+ build_token_auth_header(personal_access_token.token)
+ when :job_token
+ build_token_auth_header(job.token)
+ when :deploy_token
+ build_token_auth_header(deploy_token.token)
+ else
+ {}
+ end
+ end
+
+ before do
+ project.send("add_#{user_role}", user) if user_role
+ project.update!(visibility: visibility.to_s)
+
+ if scope == :instance
+ allow_fetch_application_setting(attribute: "npm_package_requests_forwarding", return_value: request_forward)
+ else
+ allow_fetch_cascade_application_setting(attribute: "npm_package_requests_forwarding", return_value: request_forward)
+ end
+ end
+
+ example_name = "#{params[:expected_result]} audit request"
+ status = params[:expected_status]
+
+ if scope == :instance && params[:expected_status] != :unauthorized
+ if params[:request_forward]
+ example_name = 'redirect audit request'
+ status = :temporary_redirect
+ else
+ example_name = 'reject audit request'
+ status = :not_found
+ end
+ end
+
+ it_behaves_like example_name, status: status
+ end
+ end
+
+ context 'with a group namespace' do
+ it_behaves_like 'handling all conditions'
+ end
+
+ context 'with a developer' do
+ let(:headers) { build_token_auth_header(personal_access_token.token) }
+
+ before do
+ project.add_developer(user)
+ end
+
+ context 'with a job token' do
+ let(:headers) { build_token_auth_header(job.token) }
+
+ before do
+ job.update!(status: :success)
+ end
+
+ it_behaves_like 'reject audit request', status: :unauthorized
+ end
+ end
+end
+
RSpec.shared_examples 'handling get dist tags requests' do |scope: :project|
using RSpec::Parameterized::TableSyntax
include_context 'set package name from package name type'