Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab/ci/rails.gitlab-ci.yml82
-rw-r--r--.gitlab/ci/rails/shared.gitlab-ci.yml55
-rw-r--r--.rubocop.yml1
-rw-r--r--Gemfile8
-rw-r--r--Gemfile.checksum11
-rw-r--r--Gemfile.lock42
-rw-r--r--app/assets/javascripts/editor/schema/ci.json12
-rw-r--r--app/events/project_authorizations/authorizations_added_event.rb16
-rw-r--r--app/finders/ci/runners_finder.rb2
-rw-r--r--app/graphql/resolvers/ci/runner_owner_project_resolver.rb2
-rw-r--r--app/graphql/resolvers/ci/runner_projects_resolver.rb2
-rw-r--r--app/graphql/resolvers/ci/runner_resolver.rb27
-rw-r--r--app/graphql/resolvers/ci/runners_resolver.rb2
-rw-r--r--app/models/ci/pipeline.rb6
-rw-r--r--app/models/ci/pipeline_metadata.rb2
-rw-r--r--app/models/ci/processable.rb4
-rw-r--r--app/models/compare.rb2
-rw-r--r--app/models/project_authorizations/changes.rb49
-rw-r--r--app/models/user.rb6
-rw-r--r--app/services/ci/cancel_pipeline_service.rb27
-rw-r--r--app/services/ci/pipeline_creation/cancel_redundant_pipelines_service.rb73
-rw-r--r--config/feature_flags/development/add_policy_approvers_to_rules.yml (renamed from config/feature_flags/development/create_personal_ns_outside_model.yml)10
-rw-r--r--config/feature_flags/development/ci_workflow_auto_cancel_on_new_commit.yml8
-rw-r--r--config/initializers/7_redis.rb3
-rw-r--r--config/initializers/sidekiq.rb27
-rw-r--r--config/initializers/sidekiq_cluster.rb2
-rw-r--r--config/sidekiq_queues.yml2
-rw-r--r--db/docs/batched_background_migrations/backfill_vs_code_settings_version.yml9
-rw-r--r--db/post_migrate/20231212135235_queue_backfill_vs_code_settings_version.rb27
-rw-r--r--db/schema_migrations/202312121352351
-rw-r--r--doc/api/groups.md20
-rw-r--r--doc/ci/testing/code_quality.md19
-rw-r--r--doc/ci/yaml/index.md49
-rw-r--r--doc/development/distributed_tracing.md4
-rw-r--r--doc/development/feature_flags/index.md14
-rw-r--r--doc/development/internal_analytics/metrics/metrics_dictionary.md6
-rw-r--r--doc/development/internal_analytics/review_guidelines.md2
-rw-r--r--doc/development/logging.md2
-rw-r--r--doc/development/prometheus_metrics.md8
-rw-r--r--doc/development/value_stream_analytics.md2
-rw-r--r--doc/development/value_stream_analytics/value_stream_analytics_aggregated_backend.md12
-rw-r--r--doc/security/hardening_application_recommendations.md22
-rw-r--r--doc/security/hardening_cicd_recommendations.md20
-rw-r--r--doc/security/hardening_general_concepts.md5
-rw-r--r--doc/update/package/index.md22
-rw-r--r--doc/user/analytics/analytics_dashboards.md4
-rw-r--r--doc/user/analytics/dora_metrics.md2
-rw-r--r--doc/user/analytics/merge_request_analytics.md6
-rw-r--r--doc/user/application_security/api_fuzzing/create_har_files.md2
-rw-r--r--doc/user/application_security/api_fuzzing/index.md6
-rw-r--r--doc/user/application_security/container_scanning/index.md5
-rw-r--r--doc/user/application_security/dast/checks/78.1.md2
-rw-r--r--doc/user/application_security/dast_api/index.md6
-rw-r--r--doc/user/application_security/dependency_scanning/index.md6
-rw-r--r--doc/user/application_security/policies/scan-result-policies.md6
-rw-r--r--doc/user/application_security/sast/index.md13
-rw-r--r--doc/user/application_security/secret_detection/index.md4
-rw-r--r--doc/user/group/access_and_permissions.md4
-rw-r--r--doc/user/group/devops_adoption/index.md14
-rw-r--r--doc/user/group/insights/index.md2
-rw-r--r--doc/user/group/subgroups/index.md2
-rw-r--r--doc/user/group/value_stream_analytics/index.md12
-rw-r--r--doc/user/product_analytics/index.md2
-rw-r--r--doc/user/product_analytics/instrumentation/browser_sdk.md14
-rw-r--r--doc/user/project/members/share_project_with_groups.md13
-rw-r--r--doc/user/project/service_desk/configure.md18
-rw-r--r--doc/user/project/use_project_as_go_package.md2
-rw-r--r--doc/user/project/working_with_projects.md2
-rw-r--r--gems/gitlab-database-load_balancing/Gemfile.lock2
-rw-r--r--gems/gitlab-http/Gemfile.lock2
-rw-r--r--gems/gitlab-utils/Gemfile.lock4
-rw-r--r--gems/gitlab-utils/gitlab-utils.gemspec2
-rw-r--r--lib/gitlab/background_migration/backfill_vs_code_settings_version.rb38
-rw-r--r--lib/gitlab/database/migrations/squasher.rb2
-rw-r--r--lib/gitlab/diff/rendered/notebook/diff_file_helper.rb2
-rw-r--r--lib/gitlab/error_tracking/processor/sidekiq_processor.rb2
-rw-r--r--lib/gitlab/file_detector.rb2
-rw-r--r--lib/gitlab/instrumentation/redis.rb4
-rw-r--r--lib/gitlab/instrumentation/redis_client_middleware.rb46
-rw-r--r--lib/gitlab/instrumentation/redis_helper.rb2
-rw-r--r--lib/gitlab/memory/watchdog/handlers/sidekiq_handler.rb4
-rw-r--r--lib/gitlab/patch/sidekiq_cron_poller.rb2
-rw-r--r--lib/gitlab/rack_attack/user_allowlist.rb2
-rw-r--r--lib/gitlab/redis/wrapper.rb30
-rw-r--r--lib/gitlab/runtime.rb9
-rw-r--r--lib/gitlab/sidekiq_config.rb3
-rw-r--r--lib/gitlab/sidekiq_config/cli_methods.rb2
-rw-r--r--lib/gitlab/sidekiq_logging/structured_logger.rb8
-rw-r--r--lib/gitlab/sidekiq_middleware/server_metrics.rb2
-rw-r--r--lib/gitlab/sidekiq_migrate_jobs.rb19
-rw-r--r--lib/gitlab/sidekiq_status.rb4
-rw-r--r--lib/tasks/gitlab/cleanup.rake2
-rw-r--r--qa/Gemfile.lock4
-rw-r--r--rubocop/feature_categories.rb2
-rw-r--r--rubocop/formatter/todo_formatter.rb2
-rwxr-xr-xscripts/failed_tests.rb2
-rwxr-xr-xscripts/feature_flags/used-feature-flags2
-rwxr-xr-xscripts/merge-auto-explain-logs2
-rwxr-xr-xscripts/setup/generate-as-if-foss-env.rb2
-rwxr-xr-xscripts/verify-tff-mapping2
-rw-r--r--spec/factories/ci/runners.rb3
-rw-r--r--spec/factories/users.rb2
-rw-r--r--spec/frontend/editor/schema/ci/ci_schema_spec.js14
-rw-r--r--spec/frontend/editor/schema/ci/yaml_tests/negative_tests/workflow/auto_cancel/on_job_failure.yml (renamed from spec/frontend/editor/schema/ci/yaml_tests/negative_tests/auto_cancel_pipeline.yml)1
-rw-r--r--spec/frontend/editor/schema/ci/yaml_tests/negative_tests/workflow/auto_cancel/on_new_commit.yml3
-rw-r--r--spec/frontend/editor/schema/ci/yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/none.yml4
-rw-r--r--spec/frontend/editor/schema/ci/yaml_tests/positive_tests/workflow/auto_cancel/on_job_failure.yml (renamed from spec/frontend/editor/schema/ci/yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/all.yml)1
-rw-r--r--spec/frontend/editor/schema/ci/yaml_tests/positive_tests/workflow/auto_cancel/on_new_commit.yml3
-rw-r--r--spec/graphql/resolvers/ci/group_runners_resolver_spec.rb2
-rw-r--r--spec/graphql/resolvers/ci/project_runners_resolver_spec.rb2
-rw-r--r--spec/graphql/resolvers/ci/runners_resolver_spec.rb10
-rw-r--r--spec/lib/gitlab/background_migration/backfill_vs_code_settings_version_spec.rb84
-rw-r--r--spec/lib/gitlab/ci/config/entry/auto_cancel_spec.rb2
-rw-r--r--spec/lib/gitlab/diff/rendered/notebook/diff_file_helper_spec.rb2
-rw-r--r--spec/lib/gitlab/instrumentation/redis_client_middleware_spec.rb224
-rw-r--r--spec/lib/gitlab/memory/watchdog/handlers/sidekiq_handler_spec.rb6
-rw-r--r--spec/lib/gitlab/runtime_spec.rb4
-rw-r--r--spec/lib/gitlab/sidekiq_config_spec.rb3
-rw-r--r--spec/lib/gitlab/sidekiq_logging/structured_logger_spec.rb2
-rw-r--r--spec/lib/gitlab/sidekiq_middleware/server_metrics_spec.rb4
-rw-r--r--spec/lib/gitlab/sidekiq_migrate_jobs_spec.rb7
-rw-r--r--spec/lib/gitlab/sidekiq_status_spec.rb2
-rw-r--r--spec/migrations/20231212135235_queue_backfill_vs_code_settings_version_spec.rb26
-rw-r--r--spec/models/ci/pipeline_metadata_spec.rb2
-rw-r--r--spec/models/ci/pipeline_spec.rb32
-rw-r--r--spec/models/ci/runner_spec.rb2
-rw-r--r--spec/models/project_authorizations/changes_spec.rb112
-rw-r--r--spec/models/user_spec.rb21
-rw-r--r--spec/requests/api/graphql/ci/runner_spec.rb158
-rw-r--r--spec/requests/api/graphql/ci/runners_spec.rb183
-rw-r--r--spec/services/ci/cancel_pipeline_service_spec.rb76
-rw-r--r--spec/services/ci/create_pipeline_service/workflow_auto_cancel_spec.rb2
-rw-r--r--spec/services/ci/pipeline_creation/cancel_redundant_pipelines_service_spec.rb209
-rw-r--r--spec/services/members/create_service_spec.rb26
-rw-r--r--spec/support/finder_collection.rb2
-rw-r--r--spec/support/helpers/dns_helpers.rb13
-rw-r--r--spec/support/shared_contexts/lib/gitlab/sidekiq_logging/structured_logger_shared_context.rb4
-rw-r--r--spec/support/shared_contexts/lib/gitlab/sidekiq_middleware/server_metrics_shared_context.rb1
-rw-r--r--spec/support/shared_examples/redis/redis_shared_examples.rb61
-rw-r--r--spec/support/sidekiq.rb10
-rw-r--r--spec/support/sidekiq_middleware.rb9
-rw-r--r--tooling/lib/tooling/crystalball/coverage_lines_strategy.rb4
-rw-r--r--tooling/lib/tooling/test_map_generator.rb2
-rw-r--r--vendor/gems/bundler-checksum/lib/bundler_checksum/command/lint.rb2
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/Gemfile1
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/Gemfile.lock68
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/README.md2
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec4
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb26
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb2
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb15
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb38
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb73
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb1
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb19
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb2
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb1
-rw-r--r--vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb1
158 files changed, 1925 insertions, 757 deletions
diff --git a/.gitlab/ci/rails.gitlab-ci.yml b/.gitlab/ci/rails.gitlab-ci.yml
index a2564c143b8..784e0c20874 100644
--- a/.gitlab/ci/rails.gitlab-ci.yml
+++ b/.gitlab/ci/rails.gitlab-ci.yml
@@ -85,7 +85,7 @@ rspec migration pg14:
extends:
- .rspec-base-pg14
- .rspec-base-migration
- - .rspec-migration-parallel-pg14
+ - .rspec-migration-parallel
- .rails:rules:ee-and-foss-migration
rspec background_migration pg14:
@@ -154,8 +154,8 @@ rspec background_migration pg14 clusterwide-db:
rspec unit pg14:
extends:
- .rspec-base-pg14
+ - .rspec-unit-parallel
- .rails:rules:ee-and-foss-unit
- - .rspec-unit-pg14-parallel
rspec unit pg14 single-redis:
extends:
@@ -195,7 +195,7 @@ rspec unit pg14 praefect:
rspec integration pg14:
extends:
- .rspec-base-pg14
- - .rspec-integration-parallel-pg14
+ - .rspec-integration-parallel
- .rails:rules:ee-and-foss-integration
rspec integration pg14 single-redis:
@@ -236,8 +236,8 @@ rspec integration pg14 praefect:
rspec system pg14:
extends:
- .rspec-base-pg14
+ - .rspec-system-parallel
- .rails:rules:ee-and-foss-system
- - .rspec-system-parallel-pg14
variables:
DEBUG_GITLAB_TRANSACTION_STACK: "true"
@@ -354,7 +354,7 @@ rspec:artifact-collector unit:
- .artifact-collector
- .rails:rules:ee-and-foss-unit
needs:
- - rspec unit pg14 # 24 jobs
+ - rspec unit pg14 # 32 jobs
- job: rspec clickhouse # 1 job
optional: true
@@ -363,17 +363,17 @@ rspec:artifact-collector system:
- .artifact-collector
- .rails:rules:ee-and-foss-system
needs:
- - rspec system pg14 # 26 jobs
+ - rspec system pg14 # 32 jobs
rspec:artifact-collector remainder:
extends:
- .artifact-collector
needs:
- - job: rspec integration pg14 # 13 jobs
+ - job: rspec integration pg14 # 16 jobs
optional: true
- - job: rspec migration pg14 # 12 jobs
+ - job: rspec migration pg14 # 15 jobs
optional: true
- - job: rspec background_migration pg14 # 4 jobs
+ - job: rspec background_migration pg14 # 5 jobs
optional: true
rules:
- !reference ['.rails:rules:ee-and-foss-integration', rules]
@@ -385,58 +385,73 @@ rspec:artifact-collector as-if-foss unit:
- .artifact-collector
- .rails:rules:as-if-foss-unit
needs:
- - rspec unit pg14-as-if-foss # 28 jobs
+ - rspec unit pg14-as-if-foss # 32 jobs
rspec:artifact-collector as-if-foss system:
extends:
- .artifact-collector
- .rails:rules:as-if-foss-system
needs:
- - rspec system pg14-as-if-foss # 28 jobs
+ - rspec system pg14-as-if-foss # 32 jobs
rspec:artifact-collector as-if-foss remainder:
extends:
- .artifact-collector
needs:
- - job: rspec integration pg14-as-if-foss # 12 jobs
+ - job: rspec integration pg14-as-if-foss # 16 jobs
optional: true
- - job: rspec migration pg14-as-if-foss # 8 jobs
+ - job: rspec migration pg14-as-if-foss # 15 jobs
optional: true
- - job: rspec background_migration pg14-as-if-foss # 4 jobs
+ - job: rspec background_migration pg14-as-if-foss # 5 jobs
optional: true
rules:
- !reference ['.rails:rules:as-if-foss-integration', rules]
- !reference ['.rails:rules:as-if-foss-migration', rules]
- !reference ['.rails:rules:as-if-foss-background-migration', rules]
-rspec:artifact-collector single-redis:
+rspec:artifact-collector unit single-redis:
extends:
- .artifact-collector
- .rails:rules:single-redis
needs:
- - rspec unit pg14 single-redis # 28 jobs
- - rspec integration pg14 single-redis # 12 jobs
+ - rspec unit pg14 single-redis # 32 jobs
rspec:artifact-collector system single-redis:
extends:
- .artifact-collector
- .rails:rules:single-redis
needs:
- - rspec system pg14 single-redis # 28 jobs
+ - rspec system pg14 single-redis # 32 jobs
+
+rspec:artifact-collector remainder single-redis:
+ extends:
+ - .artifact-collector
+ - .rails:rules:single-redis
+ needs:
+ - rspec integration pg14 single-redis # 16 jobs
rspec:artifact-collector ee single-redis:
extends:
- .artifact-collector
- .rails:rules:single-redis
needs:
- - job: rspec-ee unit pg14 single-redis # 18 jobs
+ - job: rspec-ee unit pg14 single-redis # 28 jobs
optional: true
- - job: rspec-ee integration pg14 single-redis # 6 jobs
+ - job: rspec-ee integration pg14 single-redis # 7 jobs
optional: true
- - job: rspec-ee system pg14 single-redis # 10 jobs
+ - job: rspec-ee system pg14 single-redis # 14 jobs
+ optional: true
+
+rspec:artifact-collector ee unit:
+ extends:
+ - .artifact-collector
+ needs:
+ - job: rspec-ee unit pg14 # 28 jobs
optional: true
+ rules:
+ - !reference ['.rails:rules:ee-only-unit', rules]
-rspec:artifact-collector ee:
+rspec:artifact-collector ee remainder:
extends:
- .artifact-collector
needs:
@@ -444,18 +459,15 @@ rspec:artifact-collector ee:
optional: true
- job: rspec-ee background_migration pg14 # 2 jobs
optional: true
- - job: rspec-ee unit pg14 # 22 jobs
- optional: true
- job: rspec-ee clickhouse # 1 job
optional: true
- - job: rspec-ee integration pg14 # 5 jobs
+ - job: rspec-ee integration pg14 # 7 jobs
optional: true
- - job: rspec-ee system pg14 # 12 jobs
+ - job: rspec-ee system pg14 # 14 jobs
optional: true
rules:
- !reference ['.rails:rules:ee-only-migration', rules]
- !reference ['.rails:rules:ee-only-background-migration', rules]
- - !reference ['.rails:rules:ee-only-unit', rules]
- !reference ['.rails:rules:ee-only-integration', rules]
- !reference ['.rails:rules:ee-only-system', rules]
@@ -475,10 +487,12 @@ rspec:coverage:
optional: true
- job: rspec:artifact-collector remainder
optional: true
- - job: rspec:artifact-collector single-redis
+ - job: rspec:artifact-collector unit single-redis
optional: true
- job: rspec:artifact-collector system single-redis
optional: true
+ - job: rspec:artifact-collector remainder single-redis
+ optional: true
# as-if-foss jobs
- job: rspec:artifact-collector as-if-foss unit
optional: true
@@ -489,7 +503,9 @@ rspec:coverage:
# EE jobs
- job: rspec:artifact-collector ee single-redis
optional: true
- - job: rspec:artifact-collector ee
+ - job: rspec:artifact-collector ee unit
+ optional: true
+ - job: rspec:artifact-collector ee remainder
optional: true
# Memory jobs
- job: memory-on-boot
@@ -637,7 +653,7 @@ rspec migration pg14-as-if-foss:
extends:
- .rspec-base-pg14-as-if-foss
- .rspec-base-migration
- - .rspec-migration-parallel-pg14-as-if-foss
+ - .rspec-migration-parallel
- .rails:rules:as-if-foss-migration
rspec background_migration pg14-as-if-foss:
@@ -874,8 +890,8 @@ rspec-ee background_migration pg14 praefect:
rspec-ee unit pg14:
extends:
- .rspec-ee-base-pg14
+ - .rspec-ee-unit-parallel
- .rails:rules:ee-only-unit
- - .rspec-ee-unit-pg14-parallel
rspec-ee unit pg14 es8:
extends:
@@ -913,8 +929,8 @@ rspec-ee unit pg14 clusterwide-db:
rspec-ee integration pg14:
extends:
- .rspec-ee-base-pg14
+ - .rspec-ee-integration-parallel
- .rails:rules:ee-only-integration
- - .rspec-ee-integration-parallel-pg14
rspec-ee integration pg14 es8:
extends:
@@ -952,8 +968,8 @@ rspec-ee integration pg14 clusterwide-db:
rspec-ee system pg14:
extends:
- .rspec-ee-base-pg14
+ - .rspec-ee-system-parallel
- .rails:rules:ee-only-system
- - .rspec-ee-system-parallel-pg14
rspec-ee system pg14 es8:
extends:
diff --git a/.gitlab/ci/rails/shared.gitlab-ci.yml b/.gitlab/ci/rails/shared.gitlab-ci.yml
index 1ad1f60820a..1f420b9b8ec 100644
--- a/.gitlab/ci/rails/shared.gitlab-ci.yml
+++ b/.gitlab/ci/rails/shared.gitlab-ci.yml
@@ -265,10 +265,10 @@ include:
# rspec:artifact-collector jobs in .gitlab/ci/rails.gitlab-ci.yml
# Please double-check and adjust accordingly
.rspec-migration-parallel:
- parallel: 8
+ parallel: 15
.rspec-background-migration-parallel:
- parallel: 4
+ parallel: 5
.rspec-ee-migration-parallel:
parallel: 2
@@ -277,58 +277,19 @@ include:
parallel: 2
.rspec-unit-parallel:
- parallel: 28
+ parallel: 32
.rspec-ee-unit-parallel:
- parallel: 18
+ parallel: 28
.rspec-integration-parallel:
- parallel: 12
+ parallel: 16
.rspec-ee-integration-parallel:
- parallel: 6
+ parallel: 7
.rspec-system-parallel:
- parallel: 28
+ parallel: 32
.rspec-ee-system-parallel:
- parallel: 10
-
-# Optimizations to distribute CI time more evenly across the CI/CD pipeline
-#
-# See https://gitlab.com/gitlab-org/gitlab/-/merge_requests/133976 for more info
-
-# Giving 4 jobs to `rspec-ee unit pg14`
-.rspec-unit-pg14-parallel:
- parallel: 24
-
-# Receiving 4 jobs from `rspec unit pg14`
-.rspec-ee-unit-pg14-parallel:
- parallel: 22
-
-# Giving 2 jobs to `rspec-ee system pg14`
-.rspec-system-parallel-pg14:
- parallel: 26
-
-# Receiving 2 jobs from `rspec system pg14`
-.rspec-ee-system-parallel-pg14:
- parallel: 12
-
-# Adding 4 jobs, as those needs to be a bit faster.
-.rspec-migration-parallel-pg14:
- parallel: 12
-
-# Adding 2 jobs, as those needs to be a bit faster.
-.rspec-migration-parallel-pg14-as-if-foss:
- parallel: 10
-
-# Giving 1 job to `rspec integration pg14`
-.rspec-ee-integration-parallel-pg14:
- parallel: 5
-
-# Receiving 1 job from `rspec-ee integration pg14`
-.rspec-integration-parallel-pg14:
- parallel: 13
-
-# rspec job parallel configs
-############################
+ parallel: 14
diff --git a/.rubocop.yml b/.rubocop.yml
index acd033fdaf8..dac452cb484 100644
--- a/.rubocop.yml
+++ b/.rubocop.yml
@@ -1035,6 +1035,7 @@ Cop/FeatureFlagUsage:
Style/ArgumentsForwarding:
Enabled: true
+ UseAnonymousForwarding: false # Turn this on only after Ruby 3.2+ is required
Search/NamespacedClass:
Enabled: true
diff --git a/Gemfile b/Gemfile
index 30e4041df73..be3b46f09d0 100644
--- a/Gemfile
+++ b/Gemfile
@@ -133,11 +133,11 @@ gem 'gitlab_omniauth-ldap', '~> 2.2.0', require: 'omniauth-ldap' # rubocop:todo
gem 'net-ldap', '~> 0.17.1' # rubocop:todo Gemfile/MissingFeatureCategory
# API
-gem 'grape', '~> 1.7.1', feature_category: :api
+gem 'grape', '~> 2.0.0', feature_category: :api
gem 'grape-entity', '~> 0.10.0', feature_category: :api
-gem 'grape-swagger', '~> 1.6.1', group: [:development, :test], feature_category: :api
+gem 'grape-swagger', '~> 2.0.1', group: [:development, :test], feature_category: :api
gem 'grape-swagger-entity', '~> 0.5.1', group: [:development, :test], feature_category: :api
-gem 'grape-path-helpers', '~> 1.7.1', feature_category: :api
+gem 'grape-path-helpers', '~> 2.0.0', feature_category: :api
gem 'rack-cors', '~> 2.0.1', require: 'rack/cors' # rubocop:todo Gemfile/MissingFeatureCategory
# GraphQL API
@@ -251,7 +251,7 @@ gem 'state_machines-activerecord', '~> 0.8.0' # rubocop:todo Gemfile/MissingFeat
gem 'acts-as-taggable-on', '~> 10.0' # rubocop:todo Gemfile/MissingFeatureCategory
# Background jobs
-gem 'sidekiq', '~> 6.5.10' # rubocop:todo Gemfile/MissingFeatureCategory
+gem 'sidekiq', '~> 7.1.6' # rubocop:todo Gemfile/MissingFeatureCategory
gem 'sidekiq-cron', '~> 1.12.0', feature_category: :shared
gem 'gitlab-sidekiq-fetcher', path: 'vendor/gems/sidekiq-reliable-fetch', require: 'sidekiq-reliable-fetch' # rubocop:todo Gemfile/MissingFeatureCategory
diff --git a/Gemfile.checksum b/Gemfile.checksum
index 73998da5c88..e5eb59ed9cd 100644
--- a/Gemfile.checksum
+++ b/Gemfile.checksum
@@ -128,7 +128,7 @@
{"name":"doorkeeper-openid_connect","version":"1.8.7","platform":"ruby","checksum":"71edaf33118deefe25674ba3f8280c32835f057351f70e9beb222c0fd6b8e786"},
{"name":"dotenv","version":"2.7.6","platform":"ruby","checksum":"2451ed5e8e43776d7a787e51d6f8903b98e446146c7ad143d5678cc2c409d547"},
{"name":"dry-cli","version":"1.0.0","platform":"ruby","checksum":"28ead169f872954dd08910eb8ead59cf86cd18b4aab321e8eeefe945749569f0"},
-{"name":"dry-core","version":"1.0.0","platform":"ruby","checksum":"7a92099870967f0d2c9997950608cb8bb622dafeea20b2fe1dd49e9ba1d0f305"},
+{"name":"dry-core","version":"1.0.1","platform":"ruby","checksum":"f32f4245e0f54e787f3708584ed8f7545aaf8dd99072e36f169312468ec5450d"},
{"name":"dry-inflector","version":"1.0.0","platform":"ruby","checksum":"6ad22361ca2d6f3f001ae3037ffcfea01163f644280d13a9195d3c3a94dd1626"},
{"name":"dry-logic","version":"1.5.0","platform":"ruby","checksum":"99ed2180f1970c3d8247004f277a01dffbe8e82cf6680de9c7209312d86cd416"},
{"name":"dry-types","version":"1.7.1","platform":"ruby","checksum":"12165841145a18dd22151f143707b90c8093f71e5ae06ee0f2301f5321f8cdb8"},
@@ -258,10 +258,10 @@
{"name":"googleapis-common-protos-types","version":"1.5.0","platform":"ruby","checksum":"5769cf7376abc86ef7f5897a4aaca1d5c5a3c49ddabeddd2c251fcf8155f858b"},
{"name":"googleauth","version":"1.3.0","platform":"ruby","checksum":"51dd7362353cf1e90a2d01e1fb94321ae3926c776d4dc4a79db65230217ffcc2"},
{"name":"gpgme","version":"2.0.23","platform":"ruby","checksum":"c87bbafdb8719da7c58ebcac08297aa1fb227022ac6cd2972829ba68adc91c04"},
-{"name":"grape","version":"1.7.1","platform":"ruby","checksum":"6b679d8918ee3dc19b0da95a5069dc95a71a15cf5788f5f787bb2ededf58cbb6"},
+{"name":"grape","version":"2.0.0","platform":"ruby","checksum":"3aeff94c17e84ccead4ff98833df691e7da0c108878cc128ca31f80c1047494a"},
{"name":"grape-entity","version":"0.10.0","platform":"ruby","checksum":"9aed1e7cbbc96d9e73f72e5f32c776d4ba8a5baf54c3acda2682008dba2b2cfe"},
-{"name":"grape-path-helpers","version":"1.7.1","platform":"ruby","checksum":"2e27271a20d4073e3a3b2b955425c7f803e198be3ba8f6e59e3d59643c5381e2"},
-{"name":"grape-swagger","version":"1.6.1","platform":"ruby","checksum":"0fd2d38476524b66e8d06de71e6c481d34286d895b12161f5df4427d66d5c69f"},
+{"name":"grape-path-helpers","version":"2.0.1","platform":"ruby","checksum":"ad5216e52c6e796738a9118087352ab4c962900dbad1d8f8c0f96e093c6702d7"},
+{"name":"grape-swagger","version":"2.0.1","platform":"ruby","checksum":"0f90bede86dfe0f5317ea52fe9bfa93e595020e848cb46f1f8c47be04cb4c790"},
{"name":"grape-swagger-entity","version":"0.5.1","platform":"ruby","checksum":"f51e372d00ac96cf90d948f87b3f4eb287ab053976ca57ad503d442ad8605523"},
{"name":"grape_logging","version":"1.8.4","platform":"ruby","checksum":"efcc3e322dbd5d620a68f078733b7db043cf12680144cd03c982f14115c792d1"},
{"name":"graphiql-rails","version":"1.8.0","platform":"ruby","checksum":"02e2c5098be2c6c29219a0e9b2910a2cd3c494301587a3199a7c4484d8038ed1"},
@@ -514,6 +514,7 @@
{"name":"redcarpet","version":"3.6.0","platform":"ruby","checksum":"8ad1889c0355ff4c47174af14edd06d62f45a326da1da6e8a121d59bdcd2e9e9"},
{"name":"redis","version":"4.8.0","platform":"ruby","checksum":"2000cf5014669c9dc821704b6d322a35a9a33852a95208911d9175d63b448a44"},
{"name":"redis-actionpack","version":"5.4.0","platform":"ruby","checksum":"f10cf649ab05914716d63334d7f709221ecc883b87cf348f90ecfe0c35ea3540"},
+{"name":"redis-client","version":"0.19.0","platform":"ruby","checksum":"6ed9af23ff5aa87cf4d59439db77082b4cae5a0abbdd114ec5420bd63456324d"},
{"name":"redis-namespace","version":"1.10.0","platform":"ruby","checksum":"2c1c6ea7c6c5e343e75b9bee3aa4c265e364a5b9966507397467af2bb3758d94"},
{"name":"redis-rack","version":"3.0.0","platform":"ruby","checksum":"abb50b82ae10ad4d11ca2e4901bfc2b98256cdafbbd95f80c86fc9e001478380"},
{"name":"redis-store","version":"1.10.0","platform":"ruby","checksum":"f258894f9f7e82834308a3d86242294f0cff2c9db9ae66e5cb4c553a5ec8b09e"},
@@ -590,7 +591,7 @@
{"name":"sexp_processor","version":"4.17.0","platform":"ruby","checksum":"4daa4874ce1838cd801c65e66ed5d4f140024404a3de7482c36d4ef2604dff6f"},
{"name":"shellany","version":"0.0.1","platform":"ruby","checksum":"0e127a9132698766d7e752e82cdac8250b6adbd09e6c0a7fbbb6f61964fedee7"},
{"name":"shoulda-matchers","version":"5.1.0","platform":"ruby","checksum":"a01d20589989e9653ab4a28c67d9db2b82bcf0a2496cf01d5e1a95a4aaaf5b07"},
-{"name":"sidekiq","version":"6.5.12","platform":"ruby","checksum":"b4f93b2204c42220d0b526a7b8e0c49b5f9da82c1ce1a05d2baf1e8f744c197f"},
+{"name":"sidekiq","version":"7.1.6","platform":"ruby","checksum":"7859da66d5bcef3c22bea2c3091d08c866890168e003f5bf4dea197dc37843a2"},
{"name":"sidekiq-cron","version":"1.12.0","platform":"ruby","checksum":"6663080a454088bd88773a0da3ae91e554b8a2e8b06cfc629529a83fd1a3096c"},
{"name":"sigdump","version":"0.2.4","platform":"ruby","checksum":"0bf2176e55c1a262788623fe5ea57caddd6ba2abebe5e349d9d5e7c3a3010ed7"},
{"name":"signet","version":"0.17.0","platform":"ruby","checksum":"1d2831930dc28da32e34bec68cf7ded97ee2867b208f97c500ee293829cb0004"},
diff --git a/Gemfile.lock b/Gemfile.lock
index ec4e4e850b2..3cbaf38ed71 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -91,7 +91,7 @@ PATH
actionview (>= 6.1.7.2)
activesupport (>= 6.1.7.2)
addressable (~> 2.8)
- nokogiri (~> 1.15.2)
+ nokogiri (>= 1.15)
rake (~> 13.0)
PATH
@@ -181,9 +181,9 @@ PATH
PATH
remote: vendor/gems/sidekiq-reliable-fetch
specs:
- gitlab-sidekiq-fetcher (0.10.0)
+ gitlab-sidekiq-fetcher (0.11.0)
json (>= 2.5)
- sidekiq (~> 6.1)
+ sidekiq (~> 7.0)
GEM
remote: https://rubygems.org/
@@ -502,7 +502,7 @@ GEM
jwt (>= 2.5)
dotenv (2.7.6)
dry-cli (1.0.0)
- dry-core (1.0.0)
+ dry-core (1.0.1)
concurrent-ruby (~> 1.0)
zeitwerk (~> 2.6)
dry-inflector (1.0.0)
@@ -816,23 +816,24 @@ GEM
signet (>= 0.16, < 2.a)
gpgme (2.0.23)
mini_portile2 (~> 2.7)
- grape (1.7.1)
- activesupport
+ grape (2.0.0)
+ activesupport (>= 5)
builder
dry-types (>= 1.1)
mustermann-grape (~> 1.0.0)
- rack (>= 1.3.0, < 3)
+ rack (>= 1.3.0)
rack-accept
grape-entity (0.10.0)
activesupport (>= 3.0.0)
multi_json (>= 1.3.2)
- grape-path-helpers (1.7.1)
+ grape-path-helpers (2.0.1)
activesupport
- grape (~> 1.3)
+ grape (~> 2.0)
rake (> 12)
ruby2_keywords (~> 0.0.2)
- grape-swagger (1.6.1)
- grape (~> 1.3)
+ grape-swagger (2.0.1)
+ grape (>= 1.7, < 3.0)
+ rack-test (~> 2)
grape-swagger-entity (0.5.1)
grape-entity (>= 0.6.0)
grape-swagger (>= 1.2.0)
@@ -1360,6 +1361,8 @@ GEM
actionpack (>= 5, < 8)
redis-rack (>= 2.1.0, < 4)
redis-store (>= 1.1.0, < 2)
+ redis-client (0.19.0)
+ connection_pool
redis-namespace (1.10.0)
redis (>= 4)
redis-rack (3.0.0)
@@ -1546,10 +1549,11 @@ GEM
shellany (0.0.1)
shoulda-matchers (5.1.0)
activesupport (>= 5.2.0)
- sidekiq (6.5.12)
- connection_pool (>= 2.2.5, < 3)
- rack (~> 2.0)
- redis (>= 4.5.0, < 5)
+ sidekiq (7.1.6)
+ concurrent-ruby (< 2)
+ connection_pool (>= 2.3.0)
+ rack (>= 2.2.4)
+ redis-client (>= 0.14.0)
sidekiq-cron (1.12.0)
fugit (~> 1.8)
globalid (>= 1.0.1)
@@ -1922,10 +1926,10 @@ DEPENDENCIES
google-cloud-storage (~> 1.45.0)
google-protobuf (~> 3.25, >= 3.25.1)
gpgme (~> 2.0.23)
- grape (~> 1.7.1)
+ grape (~> 2.0.0)
grape-entity (~> 0.10.0)
- grape-path-helpers (~> 1.7.1)
- grape-swagger (~> 1.6.1)
+ grape-path-helpers (~> 2.0.0)
+ grape-swagger (~> 2.0.1)
grape-swagger-entity (~> 0.5.1)
grape_logging (~> 1.8)
graphiql-rails (~> 1.8.0)
@@ -2075,7 +2079,7 @@ DEPENDENCIES
sentry-ruby (~> 5.10.0)
sentry-sidekiq (~> 5.10.0)
shoulda-matchers (~> 5.1.0)
- sidekiq (~> 6.5.10)
+ sidekiq (~> 7.1.6)
sidekiq-cron (~> 1.12.0)
sigdump (~> 0.2.4)
simple_po_parser (~> 1.1.6)
diff --git a/app/assets/javascripts/editor/schema/ci.json b/app/assets/javascripts/editor/schema/ci.json
index 1fb68394912..63c1ccb36da 100644
--- a/app/assets/javascripts/editor/schema/ci.json
+++ b/app/assets/javascripts/editor/schema/ci.json
@@ -944,7 +944,8 @@
},
"workflowAutoCancel": {
"type": "object",
- "markdownDescription": "Define the rules for when pipeline should be automatically cancelled.",
+ "description": "Define the rules for when pipeline should be automatically cancelled.",
+ "additionalProperties": false,
"properties": {
"on_job_failure": {
"markdownDescription": "Define which jobs to stop after a job fails.",
@@ -954,6 +955,15 @@
"none",
"all"
]
+ },
+ "on_new_commit": {
+ "markdownDescription": "Configure the behavior of the auto-cancel redundant pipelines feature. [Learn More](https://docs.gitlab.com/ee/ci/yaml/#workflowauto_cancelon_new_commit)",
+ "type": "string",
+ "enum": [
+ "conservative",
+ "interruptible",
+ "none"
+ ]
}
}
},
diff --git a/app/events/project_authorizations/authorizations_added_event.rb b/app/events/project_authorizations/authorizations_added_event.rb
new file mode 100644
index 00000000000..521a862218d
--- /dev/null
+++ b/app/events/project_authorizations/authorizations_added_event.rb
@@ -0,0 +1,16 @@
+# frozen_string_literal: true
+
+module ProjectAuthorizations
+ class AuthorizationsAddedEvent < ::Gitlab::EventStore::Event
+ def schema
+ {
+ 'type' => 'object',
+ 'required' => %w[project_id user_ids],
+ 'properties' => {
+ 'project_id' => { 'type' => 'integer' },
+ 'user_ids' => { 'type' => 'array' }
+ }
+ }
+ end
+ end
+end
diff --git a/app/finders/ci/runners_finder.rb b/app/finders/ci/runners_finder.rb
index 19642f58104..88402748083 100644
--- a/app/finders/ci/runners_finder.rb
+++ b/app/finders/ci/runners_finder.rb
@@ -138,7 +138,7 @@ module Ci
end
def request_tag_list!
- @runners = @runners.with_tags if !@params[:preload].present? || @params.dig(:preload, :tag_name)
+ @runners = @runners.with_tags if @params.exclude?(:preload) || @params.dig(:preload, :tag_name)
end
end
end
diff --git a/app/graphql/resolvers/ci/runner_owner_project_resolver.rb b/app/graphql/resolvers/ci/runner_owner_project_resolver.rb
index f4e044b81c9..28c39427872 100644
--- a/app/graphql/resolvers/ci/runner_owner_project_resolver.rb
+++ b/app/graphql/resolvers/ci/runner_owner_project_resolver.rb
@@ -34,7 +34,7 @@ module Resolvers
def resolve_owner
return unless runner.project_type?
- BatchLoader::GraphQL.for(runner.id).batch(key: :runner_owner_projects) do |runner_ids, loader|
+ BatchLoader::GraphQL.for(runner.id).batch do |runner_ids, loader|
# rubocop: disable CodeReuse/ActiveRecord
runner_and_projects_with_row_number =
::Ci::RunnerProject
diff --git a/app/graphql/resolvers/ci/runner_projects_resolver.rb b/app/graphql/resolvers/ci/runner_projects_resolver.rb
index c5037965e20..99c9bba1bd6 100644
--- a/app/graphql/resolvers/ci/runner_projects_resolver.rb
+++ b/app/graphql/resolvers/ci/runner_projects_resolver.rb
@@ -28,7 +28,7 @@ module Resolvers
return unless runner.project_type?
# rubocop:disable CodeReuse/ActiveRecord
- BatchLoader::GraphQL.for(runner.id).batch(key: :runner_projects) do |runner_ids, loader|
+ BatchLoader::GraphQL.for(runner.id).batch do |runner_ids, loader|
plucked_runner_and_project_ids = ::Ci::RunnerProject
.select(:runner_id, :project_id)
.where(runner_id: runner_ids)
diff --git a/app/graphql/resolvers/ci/runner_resolver.rb b/app/graphql/resolvers/ci/runner_resolver.rb
index 4250b069d20..60fb4163afe 100644
--- a/app/graphql/resolvers/ci/runner_resolver.rb
+++ b/app/graphql/resolvers/ci/runner_resolver.rb
@@ -6,13 +6,12 @@ module Resolvers
include LooksAhead
type Types::Ci::RunnerType, null: true
- extras [:lookahead]
description 'Runner information.'
argument :id,
- type: ::Types::GlobalIDType[::Ci::Runner],
- required: true,
- description: 'Runner ID.'
+ type: ::Types::GlobalIDType[::Ci::Runner],
+ required: true,
+ description: 'Runner ID.'
def resolve_with_lookahead(id:)
find_runner(id: id)
@@ -21,19 +20,13 @@ module Resolvers
private
def find_runner(id:)
- runner_id = GitlabSchema.parse_gid(id, expected_type: ::Ci::Runner).model_id.to_i
- key = {
- preload_tag_list: lookahead.selects?(:tag_list),
- preload_creator: lookahead.selects?(:created_by)
- }
-
- BatchLoader::GraphQL.for(runner_id).batch(key: key) do |ids, loader, batch|
- results = ::Ci::Runner.id_in(ids)
- results = results.with_tags if batch[:key][:preload_tag_list]
- results = results.with_creator if batch[:key][:preload_creator]
-
- results.each { |record| loader.call(record.id, record) }
- end
+ preloads = []
+ preloads << :creator if lookahead.selects?(:created_by)
+ preloads << :tags if lookahead.selects?(:tag_list)
+
+ runner_id = GitlabSchema.parse_gid(id, expected_type: ::Ci::Runner).model_id
+
+ ::Gitlab::Graphql::Loaders::BatchModelLoader.new(::Ci::Runner, runner_id, preloads).find
end
end
end
diff --git a/app/graphql/resolvers/ci/runners_resolver.rb b/app/graphql/resolvers/ci/runners_resolver.rb
index 9121c413b1f..38d2ebe046b 100644
--- a/app/graphql/resolvers/ci/runners_resolver.rb
+++ b/app/graphql/resolvers/ci/runners_resolver.rb
@@ -82,7 +82,7 @@ module Resolvers
creator_id:
params[:creator_id] ? ::GitlabSchema.parse_gid(params[:creator_id], expected_type: ::User).model_id : nil,
version_prefix: params[:version_prefix],
- preload: false # we'll handle preloading ourselves
+ preload: {} # we'll handle preloading ourselves
}.compact
.merge(parent_param)
end
diff --git a/app/models/ci/pipeline.rb b/app/models/ci/pipeline.rb
index 97e49c15814..1bf4d585e1c 100644
--- a/app/models/ci/pipeline.rb
+++ b/app/models/ci/pipeline.rb
@@ -438,7 +438,7 @@ module Ci
where_exists(Ci::Build.latest.scoped_pipeline.with_artifacts(reports_scope))
end
- scope :with_only_interruptible_builds, -> do
+ scope :conservative_interruptible, -> do
where_not_exists(
Ci::Build.scoped_pipeline.with_status(STARTED_STATUSES).not_interruptible
)
@@ -1393,6 +1393,10 @@ module Ci
merge_request.merge_request_diff_for(merge_request_diff_sha)
end
+ def auto_cancel_on_new_commit
+ pipeline_metadata&.auto_cancel_on_new_commit || 'conservative'
+ end
+
private
def add_message(severity, content)
diff --git a/app/models/ci/pipeline_metadata.rb b/app/models/ci/pipeline_metadata.rb
index 37fa3e32ad8..a41cdf61b71 100644
--- a/app/models/ci/pipeline_metadata.rb
+++ b/app/models/ci/pipeline_metadata.rb
@@ -7,7 +7,7 @@ module Ci
enum auto_cancel_on_new_commit: {
conservative: 0,
interruptible: 1,
- disabled: 2
+ none: 2
}, _prefix: true
enum auto_cancel_on_job_failure: {
diff --git a/app/models/ci/processable.rb b/app/models/ci/processable.rb
index 414d36da7c3..989d6337ab7 100644
--- a/app/models/ci/processable.rb
+++ b/app/models/ci/processable.rb
@@ -33,6 +33,10 @@ module Ci
where('NOT EXISTS (?)', needs)
end
+ scope :interruptible, -> do
+ joins(:metadata).merge(Ci::BuildMetadata.with_interruptible)
+ end
+
scope :not_interruptible, -> do
joins(:metadata).where.not(
Ci::BuildMetadata.table_name => { id: Ci::BuildMetadata.scoped_build.with_interruptible.select(:id) }
diff --git a/app/models/compare.rb b/app/models/compare.rb
index 58279cb58aa..1471a73f327 100644
--- a/app/models/compare.rb
+++ b/app/models/compare.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
class Compare
include Gitlab::Utils::StrongMemoize
diff --git a/app/models/project_authorizations/changes.rb b/app/models/project_authorizations/changes.rb
index ac52bdfdb07..256207d2cc7 100644
--- a/app/models/project_authorizations/changes.rb
+++ b/app/models/project_authorizations/changes.rb
@@ -21,6 +21,7 @@ module ProjectAuthorizations
@authorizations_to_add = []
@affected_project_ids = Set.new
@removed_user_ids = Set.new
+ @added_user_ids = Set.new
yield self
end
@@ -61,6 +62,7 @@ module ProjectAuthorizations
def add_authorizations
insert_all_in_batches(authorizations_to_add)
@affected_project_ids += authorizations_to_add.pluck(:project_id)
+ @added_user_ids += authorizations_to_add.pluck(:user_id)
end
def delete_authorizations_for_user
@@ -139,23 +141,52 @@ module ProjectAuthorizations
end
def publish_events
+ publish_changed_event
+ publish_removed_event
+ publish_added_event
+ end
+
+ def publish_changed_event
+ # This event is used to add policy approvers to approval rules by re-syncing all project policies which is costly.
+ # If the feature flag below is enabled, the policies won't be re-synced and
+ # the approvers will be added via `AuthorizationsAddedEvent`.
+ return if ::Feature.enabled?(:add_policy_approvers_to_rules)
+
@affected_project_ids.each do |project_id|
::Gitlab::EventStore.publish(
::ProjectAuthorizations::AuthorizationsChangedEvent.new(data: { project_id: project_id })
)
end
- return if ::Feature.disabled?(:user_approval_rules_removal) || @removed_user_ids.blank?
+ end
- @affected_project_ids.each do |project_id|
- @removed_user_ids.to_a.each_slice(EVENT_USER_BATCH_SIZE).each do |user_ids_batch|
- ::Gitlab::EventStore.publish(
- ::ProjectAuthorizations::AuthorizationsRemovedEvent.new(data: {
- project_id: project_id,
- user_ids: user_ids_batch
- })
- )
+ def publish_removed_event
+ return if ::Feature.disabled?(:user_approval_rules_removal)
+ return if @removed_user_ids.none?
+
+ events = @affected_project_ids.flat_map do |project_id|
+ @removed_user_ids.to_a.each_slice(EVENT_USER_BATCH_SIZE).map do |user_ids_batch|
+ ::ProjectAuthorizations::AuthorizationsRemovedEvent.new(data: {
+ project_id: project_id,
+ user_ids: user_ids_batch
+ })
+ end
+ end
+ ::Gitlab::EventStore.publish_group(events)
+ end
+
+ def publish_added_event
+ return if ::Feature.disabled?(:add_policy_approvers_to_rules)
+ return if @added_user_ids.none?
+
+ events = @affected_project_ids.flat_map do |project_id|
+ @added_user_ids.to_a.each_slice(EVENT_USER_BATCH_SIZE).map do |user_ids_batch|
+ ::ProjectAuthorizations::AuthorizationsAddedEvent.new(data: {
+ project_id: project_id,
+ user_ids: user_ids_batch
+ })
end
end
+ ::Gitlab::EventStore.publish_group(events)
end
end
end
diff --git a/app/models/user.rb b/app/models/user.rb
index eb505b89cbb..9038ce46c14 100644
--- a/app/models/user.rb
+++ b/app/models/user.rb
@@ -1622,12 +1622,6 @@ class User < MainClusterwide::ApplicationRecord
if namespace
namespace.path = username if username_changed?
namespace.name = name if name_changed?
- elsif Feature.disabled?(:create_personal_ns_outside_model, Feature.current_request)
- # TODO: we should no longer need the `type` parameter once we can make the
- # the `has_one :namespace` association use the correct class.
- # issue https://gitlab.com/gitlab-org/gitlab/-/issues/341070
- namespace = build_namespace(path: username, name: name, type: ::Namespaces::UserNamespace.sti_name)
- namespace.build_namespace_settings
end
end
diff --git a/app/services/ci/cancel_pipeline_service.rb b/app/services/ci/cancel_pipeline_service.rb
index 38053b13921..92eead3fdd1 100644
--- a/app/services/ci/cancel_pipeline_service.rb
+++ b/app/services/ci/cancel_pipeline_service.rb
@@ -10,17 +10,20 @@ module Ci
# @cascade_to_children - if true cancels all related child pipelines for parent child pipelines
# @auto_canceled_by_pipeline - store the pipeline_id of the pipeline that triggered cancellation
# @execute_async - if true cancel the children asyncronously
+ # @safe_cancellation - if true only cancel interruptible:true jobs
def initialize(
pipeline:,
current_user:,
cascade_to_children: true,
auto_canceled_by_pipeline: nil,
- execute_async: true)
+ execute_async: true,
+ safe_cancellation: false)
@pipeline = pipeline
@current_user = current_user
@cascade_to_children = cascade_to_children
@auto_canceled_by_pipeline = auto_canceled_by_pipeline
@execute_async = execute_async
+ @safe_cancellation = safe_cancellation
end
def execute
@@ -42,13 +45,16 @@ module Ci
log_pipeline_being_canceled
pipeline.update_column(:auto_canceled_by_id, @auto_canceled_by_pipeline.id) if @auto_canceled_by_pipeline
- cancel_jobs(pipeline.cancelable_statuses)
- return ServiceResponse.success unless cascade_to_children?
+ if @safe_cancellation
+ # Only build and bridge (trigger) jobs can be interruptible.
+ # We do not cancel GenericCommitStatuses because they can't have the `interruptible` attribute.
+ cancel_jobs(pipeline.processables.cancelable.interruptible)
+ else
+ cancel_jobs(pipeline.cancelable_statuses)
+ end
- # cancel any bridges that could spin up new child pipelines
- cancel_jobs(pipeline.bridges_in_self_and_project_descendants.cancelable)
- cancel_children
+ cancel_children if cascade_to_children?
ServiceResponse.success
end
@@ -106,8 +112,15 @@ module Ci
)
end
- # For parent child-pipelines only (not multi-project)
+ # We don't handle the case when `cascade_to_children` is `true` and `safe_cancellation` is `true`
+ # because `safe_cancellation` is passed as `true` only when `cascade_to_children` is `false`
+ # from `CancelRedundantPipelinesService`.
+ # In the future, when "safe cancellation" is implemented as a regular cancellation feature,
+ # we need to handle this case.
def cancel_children
+ cancel_jobs(pipeline.bridges_in_self_and_project_descendants.cancelable)
+
+ # For parent child-pipelines only (not multi-project)
pipeline.all_child_pipelines.each do |child_pipeline|
if execute_async?
::Ci::CancelPipelineWorker.perform_async(
diff --git a/app/services/ci/pipeline_creation/cancel_redundant_pipelines_service.rb b/app/services/ci/pipeline_creation/cancel_redundant_pipelines_service.rb
index 224b2d96205..98469e82af3 100644
--- a/app/services/ci/pipeline_creation/cancel_redundant_pipelines_service.rb
+++ b/app/services/ci/pipeline_creation/cancel_redundant_pipelines_service.rb
@@ -23,7 +23,7 @@ module Ci
pipelines = parent_and_child_pipelines(ids)
Gitlab::OptimisticLocking.retry_lock(pipelines, name: 'cancel_pending_pipelines') do |cancelables|
- auto_cancel_interruptible_pipelines(cancelables.ids)
+ auto_cancel_pipelines(cancelables.ids)
end
end
end
@@ -69,31 +69,66 @@ module Ci
.base_and_descendants
.alive_or_scheduled
end
- # rubocop: enable CodeReuse/ActiveRecord
- def auto_cancel_interruptible_pipelines(pipeline_ids)
+ def legacy_auto_cancel_pipelines(pipeline_ids)
::Ci::Pipeline
.id_in(pipeline_ids)
- .with_only_interruptible_builds
+ .conservative_interruptible
.each do |cancelable_pipeline|
- Gitlab::AppLogger.info(
- class: self.class.name,
- message: "Pipeline #{pipeline.id} auto-canceling pipeline #{cancelable_pipeline.id}",
- canceled_pipeline_id: cancelable_pipeline.id,
- canceled_by_pipeline_id: pipeline.id,
- canceled_by_pipeline_source: pipeline.source
- )
-
- # cascade_to_children not needed because we iterate through descendants here
- ::Ci::CancelPipelineService.new(
- pipeline: cancelable_pipeline,
- current_user: nil,
- auto_canceled_by_pipeline: pipeline,
- cascade_to_children: false
- ).force_execute
+ cancel_pipeline(cancelable_pipeline, safe_cancellation: false)
end
end
+ def auto_cancel_pipelines(pipeline_ids)
+ if Feature.disabled?(:ci_workflow_auto_cancel_on_new_commit, project)
+ return legacy_auto_cancel_pipelines(pipeline_ids)
+ end
+
+ ::Ci::Pipeline
+ .id_in(pipeline_ids)
+ .each do |cancelable_pipeline|
+ case cancelable_pipeline.auto_cancel_on_new_commit
+ when 'none'
+ # no-op
+ when 'conservative'
+ next unless conservative_cancellable_pipeline_ids(pipeline_ids).include?(cancelable_pipeline.id)
+
+ cancel_pipeline(cancelable_pipeline, safe_cancellation: false)
+ when 'interruptible'
+ cancel_pipeline(cancelable_pipeline, safe_cancellation: true)
+ else
+ raise ArgumentError,
+ "Unknown auto_cancel_on_new_commit value: #{cancelable_pipeline.auto_cancel_on_new_commit}"
+ end
+ end
+ end
+
+ def conservative_cancellable_pipeline_ids(pipeline_ids)
+ strong_memoize_with(:conservative_cancellable_pipeline_ids, pipeline_ids) do
+ ::Ci::Pipeline.id_in(pipeline_ids).conservative_interruptible.ids
+ end
+ end
+ # rubocop: enable CodeReuse/ActiveRecord
+
+ def cancel_pipeline(cancelable_pipeline, safe_cancellation:)
+ Gitlab::AppLogger.info(
+ class: self.class.name,
+ message: "Pipeline #{pipeline.id} auto-canceling pipeline #{cancelable_pipeline.id}",
+ canceled_pipeline_id: cancelable_pipeline.id,
+ canceled_by_pipeline_id: pipeline.id,
+ canceled_by_pipeline_source: pipeline.source
+ )
+
+ # cascade_to_children not needed because we iterate through descendants here
+ ::Ci::CancelPipelineService.new(
+ pipeline: cancelable_pipeline,
+ current_user: nil,
+ auto_canceled_by_pipeline: pipeline,
+ cascade_to_children: false,
+ safe_cancellation: safe_cancellation
+ ).force_execute
+ end
+
def pipelines_created_after
3.days.ago
end
diff --git a/config/feature_flags/development/create_personal_ns_outside_model.yml b/config/feature_flags/development/add_policy_approvers_to_rules.yml
index ba8a3f7deb3..1d5584c5181 100644
--- a/config/feature_flags/development/create_personal_ns_outside_model.yml
+++ b/config/feature_flags/development/add_policy_approvers_to_rules.yml
@@ -1,8 +1,8 @@
---
-name: create_personal_ns_outside_model
-introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/139487
-rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/434921
-milestone: '16.7'
+name: add_policy_approvers_to_rules
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/138809
+rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/434385
+milestone: '16.8'
type: development
-group: group::tenant scale
+group: group::security policies
default_enabled: false
diff --git a/config/feature_flags/development/ci_workflow_auto_cancel_on_new_commit.yml b/config/feature_flags/development/ci_workflow_auto_cancel_on_new_commit.yml
new file mode 100644
index 00000000000..3b8c7b1e489
--- /dev/null
+++ b/config/feature_flags/development/ci_workflow_auto_cancel_on_new_commit.yml
@@ -0,0 +1,8 @@
+---
+name: ci_workflow_auto_cancel_on_new_commit
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/139358
+rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/434676
+milestone: '16.8'
+type: development
+group: group::pipeline authoring
+default_enabled: false
diff --git a/config/initializers/7_redis.rb b/config/initializers/7_redis.rb
index 040257535f8..5d5bb209774 100644
--- a/config/initializers/7_redis.rb
+++ b/config/initializers/7_redis.rb
@@ -29,6 +29,9 @@ Redis::Cluster.prepend(Gitlab::Patch::RedisCluster)
ConnectionPool.prepend(Gitlab::Instrumentation::ConnectionPool)
+# this only instruments `RedisClient` used in `Sidekiq.redis`
+RedisClient.register(Gitlab::Instrumentation::RedisClientMiddleware)
+
if Gitlab::Redis::Workhorse.params[:cluster].present?
raise "Do not configure workhorse with a Redis Cluster as pub/sub commands are not cluster-compatible."
end
diff --git a/config/initializers/sidekiq.rb b/config/initializers/sidekiq.rb
index 8df12671f26..9b7233dbd14 100644
--- a/config/initializers/sidekiq.rb
+++ b/config/initializers/sidekiq.rb
@@ -28,26 +28,41 @@ def enable_semi_reliable_fetch_mode?
end
# Custom Queues configuration
-queues_config_hash = Gitlab::Redis::Queues.params
+queues_config_hash = Gitlab::Redis::Queues.redis_client_params
enable_json_logs = Gitlab.config.sidekiq.log_format != 'text'
+# Sidekiq's `strict_args!` raises an exception by default in 7.0
+# https://github.com/sidekiq/sidekiq/blob/31bceff64e10d501323bc06ac0552652a47c082e/docs/7.0-Upgrade.md?plain=1#L59
+Sidekiq.strict_args!(false)
+
Sidekiq.configure_server do |config|
config[:strict] = false
config[:queues] = Gitlab::SidekiqConfig.expand_queues(config[:queues])
if enable_json_logs
- config.log_formatter = Gitlab::SidekiqLogging::JSONFormatter.new
+ config.logger.formatter = Gitlab::SidekiqLogging::JSONFormatter.new
config[:job_logger] = Gitlab::SidekiqLogging::StructuredLogger
# Remove the default-provided handler. The exception is logged inside
# Gitlab::SidekiqLogging::StructuredLogger
- config.error_handlers.delete(Sidekiq::DEFAULT_ERROR_HANDLER)
+ config.error_handlers.delete(Sidekiq::Config::ERROR_HANDLER)
end
Sidekiq.logger.info "Listening on queues #{config[:queues].uniq.sort}"
- config.redis = queues_config_hash
+ # In Sidekiq 6.x, connection pools have a size of concurrency+5.
+ # ref: https://github.com/sidekiq/sidekiq/blob/v6.5.10/lib/sidekiq/redis_connection.rb#L93
+ #
+ # In Sidekiq 7.x, capsule connection pools have a size equal to its concurrency. Internal
+ # housekeeping pool has a size of 10.
+ # ref: https://github.com/sidekiq/sidekiq/blob/v7.1.6/lib/sidekiq/capsule.rb#L94
+ # ref: https://github.com/sidekiq/sidekiq/blob/v7.1.6/lib/sidekiq/config.rb#L133
+ #
+ # We restore the concurrency+5 in Sidekiq 7.x to ensure that we do not experience resource bottlenecks with Redis
+ # connections. The connections are created lazily so slightly over-provisioning a connection pool is not an issue.
+ # This also increases the internal redis pool from 10 to concurrency+5.
+ config.redis = queues_config_hash.merge({ size: config.concurrency + 5 })
config.server_middleware(&Gitlab::SidekiqMiddleware.server_configurator(
metrics: Settings.monitoring.sidekiq_exporter,
@@ -107,8 +122,8 @@ Sidekiq.configure_client do |config|
# We only need to do this for other clients. If Sidekiq-server is the
# client scheduling jobs, we have access to the regular sidekiq logger that
# writes to STDOUT
- Sidekiq.logger = Gitlab::SidekiqLogging::ClientLogger.build
- Sidekiq.logger.formatter = Gitlab::SidekiqLogging::JSONFormatter.new if enable_json_logs
+ config.logger = Gitlab::SidekiqLogging::ClientLogger.build
+ config.logger.formatter = Gitlab::SidekiqLogging::JSONFormatter.new if enable_json_logs
config.client_middleware(&Gitlab::SidekiqMiddleware.client_configurator)
end
diff --git a/config/initializers/sidekiq_cluster.rb b/config/initializers/sidekiq_cluster.rb
index 5851e3bd838..4773152d912 100644
--- a/config/initializers/sidekiq_cluster.rb
+++ b/config/initializers/sidekiq_cluster.rb
@@ -19,7 +19,7 @@ if ENV['ENABLE_SIDEKIQ_CLUSTER']
# Allow sidekiq to cleanly terminate and push any running jobs back
# into the queue. We use the configured timeout and add a small
# grace period
- sleep(Sidekiq[:timeout] + 5)
+ sleep(Sidekiq.default_configuration[:timeout] + 5)
# Signaling the Sidekiq Pgroup as KILL is not forwarded to
# a possible child process. In Sidekiq Cluster, all child Sidekiq
diff --git a/config/sidekiq_queues.yml b/config/sidekiq_queues.yml
index 34a41c39f31..fc36a9d0b04 100644
--- a/config/sidekiq_queues.yml
+++ b/config/sidekiq_queues.yml
@@ -681,6 +681,8 @@
- 1
- - security_scan_execution_policies_rule_schedule
- 1
+- - security_scan_result_policies_add_approvers_to_rules
+ - 1
- - security_scan_result_policies_sync_any_merge_request_approval_rules
- 1
- - security_scan_result_policies_sync_project
diff --git a/db/docs/batched_background_migrations/backfill_vs_code_settings_version.yml b/db/docs/batched_background_migrations/backfill_vs_code_settings_version.yml
new file mode 100644
index 00000000000..8f5af37f3df
--- /dev/null
+++ b/db/docs/batched_background_migrations/backfill_vs_code_settings_version.yml
@@ -0,0 +1,9 @@
+---
+migration_job_name: BackfillVsCodeSettingsVersion
+description: Backfills the version column of existing vs_code_settings records
+feature_category: web_ide
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/140091
+milestone: '16.8'
+queued_migration_version: 20231212135235
+finalize_after: '2023-01-31'
+finalized_by: # version of the migration that finalized this BBM
diff --git a/db/post_migrate/20231212135235_queue_backfill_vs_code_settings_version.rb b/db/post_migrate/20231212135235_queue_backfill_vs_code_settings_version.rb
new file mode 100644
index 00000000000..db2704c3c33
--- /dev/null
+++ b/db/post_migrate/20231212135235_queue_backfill_vs_code_settings_version.rb
@@ -0,0 +1,27 @@
+# frozen_string_literal: true
+
+class QueueBackfillVsCodeSettingsVersion < Gitlab::Database::Migration[2.2]
+ milestone '16.7'
+
+ MIGRATION = "BackfillVsCodeSettingsVersion"
+ DELAY_INTERVAL = 2.minutes
+ BATCH_SIZE = 1000
+ SUB_BATCH_SIZE = 100
+
+ restrict_gitlab_migration gitlab_schema: :gitlab_main
+
+ def up
+ queue_batched_background_migration(
+ MIGRATION,
+ :vs_code_settings,
+ :id,
+ job_interval: DELAY_INTERVAL,
+ batch_size: BATCH_SIZE,
+ sub_batch_size: SUB_BATCH_SIZE
+ )
+ end
+
+ def down
+ delete_batched_background_migration(MIGRATION, :vs_code_settings, :id, [])
+ end
+end
diff --git a/db/schema_migrations/20231212135235 b/db/schema_migrations/20231212135235
new file mode 100644
index 00000000000..7f72cbe25f4
--- /dev/null
+++ b/db/schema_migrations/20231212135235
@@ -0,0 +1 @@
+de10b5c89c8aac8590f76cba73bdd05af18669c998036ab7ac82231c2feb58f8 \ No newline at end of file
diff --git a/doc/api/groups.md b/doc/api/groups.md
index e168adef6e2..90820727040 100644
--- a/doc/api/groups.md
+++ b/doc/api/groups.md
@@ -147,7 +147,7 @@ If you request this list as:
- An unauthenticated user, the response returns only public groups.
- An authenticated user, the response returns only the groups you're
-a member of and does not include public groups.
+ a member of and does not include public groups.
Parameters:
@@ -306,7 +306,7 @@ Parameters:
| `id` | integer/string | yes | The ID or [URL-encoded path of the group](rest/index.md#namespaced-path-encoding) owned by the authenticated user |
| `archived` | boolean | no | Limit by archived status |
| `visibility` | string | no | Limit by visibility `public`, `internal`, or `private` |
-| `order_by` | string | no | Return projects ordered by `id`, `name`, `path`, `created_at`, `updated_at`, `similarity` (1), or `last_activity_at` fields. Default is `created_at` |
+| `order_by` | string | no | Return projects ordered by `id`, `name`, `path`, `created_at`, `updated_at`, `similarity` <sup>1</sup>, or `last_activity_at` fields. Default is `created_at` |
| `sort` | string | no | Return projects sorted in `asc` or `desc` order. Default is `desc` |
| `search` | string | no | Return list of authorized projects matching the search criteria |
| `simple` | boolean | no | Return only limited fields for each project. This is a no-op without authentication where only simple fields are returned. |
@@ -316,14 +316,18 @@ Parameters:
| `with_issues_enabled` | boolean | no | Limit by projects with issues feature enabled. Default is `false` |
| `with_merge_requests_enabled` | boolean | no | Limit by projects with merge requests feature enabled. Default is `false` |
| `with_shared` | boolean | no | Include projects shared to this group. Default is `true` |
-| `include_subgroups` | boolean | no | Include projects in subgroups of this group. Default is `false` |
+| `include_subgroups` | boolean | no | Include projects in subgroups of this group. Default is `false` |
| `min_access_level` | integer | no | Limit to projects where current user has at least this [role (`access_level`)](members.md#roles) |
| `with_custom_attributes` | boolean | no | Include [custom attributes](custom_attributes.md) in response (administrators only) |
-| `with_security_reports` **(ULTIMATE ALL)** | boolean | no | Return only projects that have security reports artifacts present in any of their builds. This means "projects with security reports enabled". Default is `false` |
-
-1. Order by similarity: Orders the results by a similarity score calculated from the provided `search`
-URL parameter. When using `order_by=similarity`, the `sort` parameter is ignored. When the `search`
-parameter is not provided, the API returns the projects ordered by `name`.
+| `with_security_reports` **(ULTIMATE ALL)** | boolean | no | Return only projects that have security reports artifacts present in any of their builds. This means "projects with security reports enabled". Default is `false` |
+
+<html>
+<small>Footnotes:
+ <ol>
+ <li>Order by similarity: Orders the results by a similarity score calculated from the provided `search` URL parameter. When using `order_by=similarity`, the `sort` parameter is ignored. When the `search` parameter is not provided, the API returns the projects ordered by `name`.</li>
+ </ol>
+</small>
+</html>
Example response:
diff --git a/doc/ci/testing/code_quality.md b/doc/ci/testing/code_quality.md
index 1ad35e34a9e..ddea9181e65 100644
--- a/doc/ci/testing/code_quality.md
+++ b/doc/ci/testing/code_quality.md
@@ -27,14 +27,15 @@ You can extend the code coverage either by using Code Climate
Different features are available in different [GitLab tiers](https://about.gitlab.com/pricing/),
as shown in the following table:
-| Capability | In Free | In Premium | In Ultimate |
-|:-----------------------------------------------------------------------|:--------------------|:--------------------|:-------------------|
-| [Configure scanners](#customizing-scan-settings) | **{check-circle}** | **{check-circle}** | **{check-circle}** |
-| [Integrate custom scanners](#implement-a-custom-tool) | **{check-circle}** | **{check-circle}** | **{check-circle}** |
-| [See findings in merge request widget](#merge-request-widget) | **{check-circle}** | **{check-circle}** | **{check-circle}** |
-| [Generate JSON or HTML report artifacts](#output) | **{check-circle}** | **{check-circle}** | **{check-circle}** |
-| [See reports in CI pipelines](#pipeline-details-view) | **{dotted-circle}** | **{check-circle}** | **{check-circle}** |
-| [See findings in merge request diff view](#merge-request-changes-view) | **{dotted-circle}** | **{dotted-circle}** | **{check-circle}** |
+| Feature | In Free | In Premium | In Ultimate |
+|:----------------------------------------------------------------------|:-----------------------|:-----------------------|:-----------------------|
+| [Configure scanners](#customizing-scan-settings) | **{check-circle}** Yes | **{check-circle}** Yes | **{check-circle}** Yes |
+| [Integrate custom scanners](#implement-a-custom-tool) | **{check-circle}** Yes | **{check-circle}** Yes | **{check-circle}** Yes |
+| [Generate JSON or HTML report artifacts](#output) | **{check-circle}** Yes | **{check-circle}** Yes | **{check-circle}** Yes |
+| [Findings in merge request widget](#merge-request-widget) | **{check-circle}** Yes | **{check-circle}** Yes | **{check-circle}** Yes |
+| [Findings in pipelines](#pipeline-details-view) | **{dotted-circle}** No | **{check-circle}** Yes | **{check-circle}** Yes |
+| [Findings in merge request changes view](#merge-request-changes-view) | **{dotted-circle}** No | **{dotted-circle}** No | **{check-circle}** Yes |
+| [Summary in project quality view](#project-quality-view) | **{dotted-circle}** No | **{dotted-circle}** No | **{check-circle}** Yes |
## View Code Quality results
@@ -227,7 +228,7 @@ Code Quality can be customized by defining available CI/CD variables:
| CI/CD variable | Description |
|---------------------------------|-------------|
-| `CODECLIMATE_DEBUG` | Set to enable [Code Climate debug mode](https://github.com/codeclimate/codeclimate#environment-variables) |
+| `CODECLIMATE_DEBUG` | Set to enable [Code Climate debug mode](https://github.com/codeclimate/codeclimate#environment-variables). |
| `CODECLIMATE_DEV` | Set to enable `--dev` mode which lets you run engines not known to the CLI. |
| `CODECLIMATE_PREFIX` | Set a prefix to use with all `docker pull` commands in CodeClimate engines. Useful for [offline scanning](https://github.com/codeclimate/codeclimate/pull/948). For more information, see [Use a private container registry](#use-a-private-container-image-registry). |
| `CODECLIMATE_REGISTRY_USERNAME` | Set to specify the username for the registry domain parsed from `CODECLIMATE_PREFIX`. |
diff --git a/doc/ci/yaml/index.md b/doc/ci/yaml/index.md
index 2761658a719..93c38237993 100644
--- a/doc/ci/yaml/index.md
+++ b/doc/ci/yaml/index.md
@@ -480,6 +480,46 @@ You can use some [predefined CI/CD variables](../variables/predefined_variables.
- [`workflow: rules` examples](workflow.md#workflow-rules-examples)
- [Switch between branch pipelines and merge request pipelines](workflow.md#switch-between-branch-pipelines-and-merge-request-pipelines)
+#### `workflow:auto_cancel:on_new_commit`
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/412473) in GitLab 16.8 [with a flag](../../administration/feature_flags.md) named `ci_workflow_auto_cancel_on_new_commit`. Disabled by default.
+
+FLAG:
+On self-managed GitLab, by default this feature is not available. To make it available per project or
+for your entire instance, an administrator can [enable the feature flag](../../administration/feature_flags.md) named `ci_workflow_auto_cancel_on_new_commit`.
+On GitLab.com, this feature is not available.
+The feature is not ready for production use.
+
+Use `workflow:auto_cancel:on_new_commit` to configure the behavior of
+the [auto-cancel redundant pipelines](../pipelines/settings.md#auto-cancel-redundant-pipelines) feature.
+
+**Possible inputs**:
+
+- `conservative`: Cancel the pipeline, but only if no jobs with `interruptible: false` have started yet. Default when not defined.
+- `interruptible`: Cancel only jobs with `interruptible: true`.
+- `none`: Do not auto-cancel any jobs.
+
+**Example of `workflow:auto_cancel:on_new_commit`**:
+
+```yaml
+workflow:
+ auto_cancel:
+ on_new_commit: interruptible
+
+job1:
+ interruptible: true
+ script: sleep 60
+
+job2:
+ interruptible: false # Default when not defined.
+ script: sleep 60
+```
+
+In this example:
+
+- When a new commit is pushed to a branch, GitLab creates a new pipeline and `job1` and `job2` start.
+- If a new commit is pushed to the branch before the jobs complete, only `job1` is canceled.
+
#### `workflow:name`
> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/372538) in GitLab 15.5 [with a flag](../../administration/feature_flags.md) named `pipeline_name`. Disabled by default.
@@ -2621,7 +2661,8 @@ job2:
### `interruptible`
-> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/32022) in GitLab 12.3.
+> - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/32022) in GitLab 12.3.
+> - Support for `trigger` jobs [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/412473) in GitLab 16.8 [with a flag](../../administration/feature_flags.md) named `ci_workflow_auto_cancel_on_new_commit`. Disabled by default.
Use `interruptible` to configure the [auto-cancel redundant pipelines](../pipelines/settings.md#auto-cancel-redundant-pipelines)
feature to cancel a job before it completes if a new pipeline on the same ref starts for a newer commit. If the feature
@@ -2686,6 +2727,12 @@ In this example, a new pipeline causes a running pipeline to be:
a pipeline to allow users to manually prevent a pipeline from being automatically
cancelled. After a user starts the job, the pipeline cannot be canceled by the
**Auto-cancel redundant pipelines** feature.
+- When using `interruptible` with a [trigger job](#trigger):
+ - The triggered downstream pipeline is never affected by the trigger job's `interruptible` configuration.
+ - If [`workflow:auto_cancel`](#workflowauto_cancelon_new_commit) is set to `conservative`,
+ the trigger job's `interruptible` configuration has no effect.
+ - If [`workflow:auto_cancel`](#workflowauto_cancelon_new_commit) is set to `interruptible`,
+ a trigger job with `interruptible: true` can be automatically cancelled.
### `needs`
diff --git a/doc/development/distributed_tracing.md b/doc/development/distributed_tracing.md
index 86732c3a8ac..ad41d21a9e7 100644
--- a/doc/development/distributed_tracing.md
+++ b/doc/development/distributed_tracing.md
@@ -93,9 +93,9 @@ concern, some instrumentations are disabled by default. To enable those disabled
instrumentations, set the following environment variables:
- `GITLAB_TRACING_TRACK_CACHES`: enable tracking cache operations, such as cache
-read, write, or delete.
+ read, write, or delete.
- `GITLAB_TRACING_TRACK_REDIS`: enable tracking Redis operations. Most Redis
-operations are for caching, though.
+ operations are for caching, though.
## Using Jaeger in the GitLab Development Kit
diff --git a/doc/development/feature_flags/index.md b/doc/development/feature_flags/index.md
index 965b3bd86aa..59c796ca331 100644
--- a/doc/development/feature_flags/index.md
+++ b/doc/development/feature_flags/index.md
@@ -278,6 +278,7 @@ Each feature flag is defined in a separate YAML file consisting of a number of f
| `group` | yes | The [group](https://about.gitlab.com/handbook/product/categories/#devops-stages) that owns the feature flag. |
| `feature_issue_url` | no | The URL to the original feature issue. |
| `rollout_issue_url` | no | The URL to the Issue covering the feature flag rollout. |
+| `log_state_changes` | no | Used to log the state of the feature flag |
NOTE:
All validations are skipped when running in `RAILS_ENV=production`.
@@ -666,6 +667,19 @@ as follows:
Feature.remove(:feature_flag_name)
```
+### Logging
+
+Usage and state of the feature flag is logged if either:
+
+- `log_state_changes` is set to `true` in the feature flag definition.
+- `milestone` refers to a milestone that is greater than or equal to the current GitLab version.
+
+When the state of a feature flag is logged, it can be identified by using the `"json.feature_flag_states": "feature_flag_name:1"` or `"json.feature_flag_states": "feature_flag_name:0"` condition in Kibana.
+You can see an example in [this](https://log.gprd.gitlab.net/app/discover#/?_g=(filters:!(),refreshInterval:(pause:!t,value:60000),time:(from:now-7d%2Fd,to:now))&_a=(columns:!(json.feature_flag_states),filters:!(('$state':(store:appState),meta:(alias:!n,disabled:!f,field:json.feature_flag_states,index:'7092c4e2-4eb5-46f2-8305-a7da2edad090',key:json.feature_flag_states,negate:!f,params:(query:'optimize_where_full_path_in:1'),type:phrase),query:(match_phrase:(json.feature_flag_states:'optimize_where_full_path_in:1')))),hideChart:!f,index:'7092c4e2-4eb5-46f2-8305-a7da2edad090',interval:auto,query:(language:kuery,query:''),sort:!(!(json.time,desc)))) link.
+
+NOTE:
+Only 20% of the requests log the state of the feature flags. This is controlled with the [`feature_flag_state_logs`](https://gitlab.com/gitlab-org/gitlab/-/blob/6deb6ecbc69f05a80d920a295dfc1a6a303fc7a0/config/feature_flags/ops/feature_flag_state_logs.yml) feature flag.
+
## Changelog
We want to avoid introducing a changelog when features are not accessible by an end-user either directly (example: ability to use the feature) or indirectly (examples: ability to take advantage of background jobs, performance improvements, or database migration updates).
diff --git a/doc/development/internal_analytics/metrics/metrics_dictionary.md b/doc/development/internal_analytics/metrics/metrics_dictionary.md
index 463d5be9100..ba3fb28743a 100644
--- a/doc/development/internal_analytics/metrics/metrics_dictionary.md
+++ b/doc/development/internal_analytics/metrics/metrics_dictionary.md
@@ -81,9 +81,9 @@ Metric definitions can have one of the following values for `value_type`:
- `number`
- `string`
- `object`: A metric with `value_type: object` must have `value_json_schema` with a link to the JSON schema for the object.
-In general, we avoid complex objects and prefer one of the `boolean`, `number`, or `string` value types.
-An example of a metric that uses `value_type: object` is `topology` (`/config/metrics/settings/20210323120839_topology.yml`),
-which has a related schema in `/config/metrics/objects_schemas/topology_schema.json`.
+ In general, we avoid complex objects and prefer one of the `boolean`, `number`, or `string` value types.
+ An example of a metric that uses `value_type: object` is `topology` (`/config/metrics/settings/20210323120839_topology.yml`),
+ which has a related schema in `/config/metrics/objects_schemas/topology_schema.json`.
### Metric `time_frame`
diff --git a/doc/development/internal_analytics/review_guidelines.md b/doc/development/internal_analytics/review_guidelines.md
index eb59b834cbc..802a6f410ed 100644
--- a/doc/development/internal_analytics/review_guidelines.md
+++ b/doc/development/internal_analytics/review_guidelines.md
@@ -29,7 +29,7 @@ In most cases, an Analytics Instrumentation review is automatically added, but i
#### The merge request **author** should
- Decide whether a Analytics Instrumentation review is needed. You can skip the Analytics Instrumentation
-review and remove the labels if the changes are not related to the Analytics Instrumentation domain.
+ review and remove the labels if the changes are not related to the Analytics Instrumentation domain.
- If an Analytics Instrumentation review is needed and was not assigned automatically, add the labels
`~analytics instrumentation` and `~analytics instrumentation::review pending`.
- Use reviewer roulette to assign an [Analytics Instrumentation reviewer](https://gitlab-org.gitlab.io/gitlab-roulette/?hourFormat24=true&visible=reviewer%7Canalytics+instrumentation) who is not the author.
diff --git a/doc/development/logging.md b/doc/development/logging.md
index 2af914d76ef..1cfcc6530c5 100644
--- a/doc/development/logging.md
+++ b/doc/development/logging.md
@@ -330,7 +330,7 @@ Entry points can be seen at:
When adding new attributes, make sure they're exposed within the context of the entry points above and:
- Pass them within the hash to the `with_context` (or `push`) method (make sure to pass a Proc if the
-method or variable shouldn't be evaluated right away)
+ method or variable shouldn't be evaluated right away)
- Change `Gitlab::ApplicationContext` to accept these new values
- Make sure the new attributes are accepted at [`Labkit::Context`](https://gitlab.com/gitlab-org/labkit-ruby/blob/master/lib/labkit/context.rb)
diff --git a/doc/development/prometheus_metrics.md b/doc/development/prometheus_metrics.md
index 6a4a85f14ff..cef55c800e1 100644
--- a/doc/development/prometheus_metrics.md
+++ b/doc/development/prometheus_metrics.md
@@ -72,10 +72,10 @@ This section describes how to add new metrics for self-monitoring
for [Prometheus metric names](https://prometheus.io/docs/practices/naming/#metric-names).
1. Update the list of [GitLab Prometheus metrics](../administration/monitoring/prometheus/gitlab_metrics.md).
1. Carefully choose what labels you want to add to your metric. Values with high cardinality,
-like `project_path`, or `project_id` are strongly discouraged because they can affect our services
-availability due to the fact that each set of labels is exposed as a new entry in the `/metrics` endpoint.
-For example, a histogram with 10 buckets and a label with 100 values would generate 1000
-entries in the export endpoint.
+ like `project_path`, or `project_id` are strongly discouraged because they can affect our services
+ availability due to the fact that each set of labels is exposed as a new entry in the `/metrics` endpoint.
+ For example, a histogram with 10 buckets and a label with 100 values would generate 1000
+ entries in the export endpoint.
1. Trigger the relevant page or code that records the new metric.
1. Check that the new metric appears at `/-/metrics`.
diff --git a/doc/development/value_stream_analytics.md b/doc/development/value_stream_analytics.md
index 725c8aa45d2..83cfb3fb385 100644
--- a/doc/development/value_stream_analytics.md
+++ b/doc/development/value_stream_analytics.md
@@ -28,7 +28,7 @@ Apart from the durations, we expose the record count within a stage.
## Feature availability
- Group level (licensed): Requires Ultimate or Premium subscription. This version is the most
-feature-full.
+ feature-full.
- Project level (licensed): We are continually adding features to project level VSA to bring it in line with group level VSA.
- Project level (FOSS): Keep it as is.
diff --git a/doc/development/value_stream_analytics/value_stream_analytics_aggregated_backend.md b/doc/development/value_stream_analytics/value_stream_analytics_aggregated_backend.md
index 53c6721b01f..7386a83afc1 100644
--- a/doc/development/value_stream_analytics/value_stream_analytics_aggregated_backend.md
+++ b/doc/development/value_stream_analytics/value_stream_analytics_aggregated_backend.md
@@ -45,7 +45,7 @@ Benefits of the aggregated VSA backend:
- Possibility to introduce further aggregations for improving the first page load time.
- Better performance for large groups (with many subgroups, projects, issues and, merge requests).
- Ready for database decomposition. The VSA related database tables could live in a separate
-database with a minimal development effort.
+ database with a minimal development effort.
- Ready for keyset pagination which can be useful for exporting the data.
- Possibility to implement more complex event definitions.
- For example, the start event can be two timestamp columns where the earliest value would be
@@ -107,8 +107,7 @@ the service performs operations in batches and enforces strict application limit
- Load records in batches.
- Insert records in batches.
-- Stop processing when a limit is reached, schedule a background job to continue the processing
-later.
+- Stop processing when a limit is reached, schedule a background job to continue the processing later.
- Continue processing data from a specific point.
As of GitLab 14.7, the data loading is done manually. Once the feature is ready, the service is
@@ -267,16 +266,15 @@ database tables. This change could be implemented using array columns.
The feature uses private JSON APIs for delivering the data to the frontend. On the first page load
, the following requests are invoked:
-- Initial HTML page load which is mostly empty. Some configuration data is exposed via `data`
-attributes.
+- Initial HTML page load which is mostly empty. Some configuration data is exposed via `data` attributes.
- `value_streams` - Load the available value streams for the given group.
- `stages` - Load the stages for the currently selected value stream.
- `median` - For each stage, request the median duration.
- `count` - For each stage, request the number of items in the stage (this is a
-[limit count](../merge_request_concepts/performance.md#badge-counters), maximum 1000 rows).
+ [limit count](../merge_request_concepts/performance.md#badge-counters), maximum 1000 rows).
- `average_duration_chart` - Data for the duration chart.
- `summary`, `time_summary` - Top-level aggregations, most of the metrics are using different APIs/
-finders and not invoking the aggregated backend.
+ finders and not invoking the aggregated backend.
When selecting a specific stage, the `records` endpoint is invoked, which returns the related
records (paginated) for the chosen stage in a specific order.
diff --git a/doc/security/hardening_application_recommendations.md b/doc/security/hardening_application_recommendations.md
index 857e322191e..4ff1e94deb4 100644
--- a/doc/security/hardening_application_recommendations.md
+++ b/doc/security/hardening_application_recommendations.md
@@ -108,7 +108,7 @@ If GitLab is in FIPS mode, use the following:
- If using `RSA`, set it to **Must be at least 2048 bits**.
- Set all other key types to **Are forbidden**.
- If you are setting up an instance for a new group of users, define your user SSH
-key policy with the maximum bits settings for added security.
+ key policy with the maximum bits settings for added security.
In a hardened environment RSS feeds are typically not required, and in **Feed token**,
select the **Disabled feed token** checkbox.
@@ -192,14 +192,14 @@ process or authenticated user.
The main focus for hardening is **Usage statistics**:
- You should make sure **Enable version check** is selected. This checks to see if you
-are running the latest version of GitLab, and as new versions with new features and
-security patches come out frequently, this helps you stay up to date.
+ are running the latest version of GitLab, and as new versions with new features and
+ security patches come out frequently, this helps you stay up to date.
- If your environment is isolated or one where your organizational requirements
-restrict data gathering and statistics reporting to a software vendor, you may have
-to disable the **Enable service ping** feature. For more information on what data is collected to
-help you make an informed decision, see
-[service ping](../development/internal_analytics/service_ping/index.md).
+ restrict data gathering and statistics reporting to a software vendor, you may have
+ to disable the **Enable service ping** feature. For more information on what data is collected to
+ help you make an informed decision, see
+ [service ping](../development/internal_analytics/service_ping/index.md).
## Network
@@ -215,12 +215,12 @@ and user needs, which may require disabling and adjusting rate limits or enablin
accesses. Here are a few notables to keep in mind:
- In **Outbound requests**, if you need to open up access to a limited
-number of systems, you can limit access to just those systems by specifying
-IP address or hostname. Also in this section, make sure you've selected
-**Enforce DNS rebinding attack protection** if you're allowing any access at all.
+ number of systems, you can limit access to just those systems by specifying
+ IP address or hostname. Also in this section, make sure you've selected
+ **Enforce DNS rebinding attack protection** if you're allowing any access at all.
- Under **Notes rate limit** and **Users API rate limit** you can exclude specific users
-from those limits if needed.
+ from those limits if needed.
<!-- ## Troubleshooting
diff --git a/doc/security/hardening_cicd_recommendations.md b/doc/security/hardening_cicd_recommendations.md
index 4d0a85c362d..72f3bc8e7b8 100644
--- a/doc/security/hardening_cicd_recommendations.md
+++ b/doc/security/hardening_cicd_recommendations.md
@@ -22,18 +22,18 @@ individual scenarios themselves are numerous, we have summarized some basic
information to help harden the CI/CD process.
- **Secrets Management**. Passwords, tokens, keys, and other secrets that require any
-level of protection should never be stored in plaintext. Some type of encrypted
-container technology should be used, such as GCP Secret Manager, AWS KMS, or
-HashiCorp Vault. For self-managed and standalone instances, HashiCorp Vault is
-recommended, and many GitLab features can take advantage of Vault and are well
-documented in the main [Documentation](../index.md). For detailed CI/CD examples, see [using external secrets in CI](../ci/secrets/index.md).
+ level of protection should never be stored in plaintext. Some type of encrypted
+ container technology should be used, such as GCP Secret Manager, AWS KMS, or
+ HashiCorp Vault. For self-managed and standalone instances, HashiCorp Vault is
+ recommended, and many GitLab features can take advantage of Vault and are well
+ documented in the main [Documentation](../index.md). For detailed CI/CD examples, see [using external secrets in CI](../ci/secrets/index.md).
- **External Communications**. If your CI/CD process requires connectivity to other
-hosts, ensure that these communication channels are encrypted. You should use TLS 1.2 or 1.3, and where possible implement mutual TLS.
+ hosts, ensure that these communication channels are encrypted. You should use TLS 1.2 or 1.3, and where possible implement mutual TLS.
- **Logging**. Logging can be very important for auditing and troubleshooting, so it
-is important that you enable any logging features to ensure you are getting
-the information in logs you need. Make sure through periodic testing that
-plaintext secrets or other sensitive information is not inadvertently added to log
-files.
+ is important that you enable any logging features to ensure you are getting
+ the information in logs you need. Make sure through periodic testing that
+ plaintext secrets or other sensitive information is not inadvertently added to log
+ files.
## Specific Recommendations
diff --git a/doc/security/hardening_general_concepts.md b/doc/security/hardening_general_concepts.md
index 0ba8822dc5f..cb0dcb4eba7 100644
--- a/doc/security/hardening_general_concepts.md
+++ b/doc/security/hardening_general_concepts.md
@@ -19,10 +19,9 @@ just one. A quick example is account security:
- Use a long, complex, and unique password for the account.
- Implement a second factor to the authentication process for added security.
- Use a hardware token as a second factor.
-- Lock out an account (for at least a fixed amount of time) for failed authentication
-attempts.
+- Lock out an account (for at least a fixed amount of time) for failed authentication attempts.
- An account that is unused for a specific time frame should be disabled, enforce this
-with either automation or regular audits.
+ with either automation or regular audits.
Instead of using only one or two items on the list, use as many as possible. This
philosophy can apply to other areas besides account security - it should be applied to
diff --git a/doc/update/package/index.md b/doc/update/package/index.md
index e780958d485..662590e7f78 100644
--- a/doc/update/package/index.md
+++ b/doc/update/package/index.md
@@ -39,9 +39,9 @@ GitLab package.
Upgrading versions might need some manual intervention. For more information,
check the version your are upgrading to:
-- [GitLab 16](https://docs.gitlab.com/omnibus/update/gitlab_16_changes.html)
-- [GitLab 15](https://docs.gitlab.com/omnibus/update/gitlab_15_changes.html)
-- [GitLab 14](https://docs.gitlab.com/omnibus/update/gitlab_14_changes.html)
+- [GitLab 16](../versions/gitlab_16_changes.md)
+- [GitLab 15](../versions/gitlab_15_changes.md)
+- [GitLab 14](../versions/gitlab_14_changes.md)
### Earlier GitLab versions
@@ -93,10 +93,10 @@ To upgrade to the latest GitLab version:
# Ubuntu/Debian
sudo apt update && sudo apt install gitlab-ee
-# RHEL/CentOS 6 and 7
+# RHEL/CentOS 7 and Amazon Linux 2
sudo yum install gitlab-ee
-# RHEL/CentOS 8
+# RHEL/Almalinux 8/9 and Amazon Linux 2023
sudo dnf install gitlab-ee
# SUSE
@@ -124,10 +124,10 @@ or upgrade command:
# Ubuntu/Debian
sudo apt-cache madison gitlab-ee
- # RHEL/CentOS 6 and 7
+ # RHEL/CentOS 7 and Amazon Linux 2
yum --showduplicates list gitlab-ee
- # RHEL/CentOS 8
+ # RHEL/Almalinux 8/9 and Amazon Linux 2023
dnf --showduplicates list gitlab-ee
# SUSE
@@ -143,10 +143,10 @@ or upgrade command:
# Ubuntu/Debian
sudo apt install gitlab-ee=<version>
- # RHEL/CentOS 6 and 7
+ # RHEL/CentOS 7 and Amazon Linux 2
yum install gitlab-ee-<version>
- # RHEL/CentOS 8
+ # RHEL/Almalinux 8/9 and Amazon Linux 2023
dnf install gitlab-ee-<version>
# SUSE
@@ -184,10 +184,10 @@ To download and install GitLab:
# Debian/Ubuntu
dpkg -i <package_name>
- # RHEL/CentOS 6 and 7
+ # RHEL/CentOS 7 and Amazon Linux 2
rpm -Uvh <package_name>
- # RHEL/CentOS 8
+ # RHEL/Almalinux 8/9 and Amazon Linux 2023
dnf install <package_name>
# SUSE
diff --git a/doc/user/analytics/analytics_dashboards.md b/doc/user/analytics/analytics_dashboards.md
index 76acba44682..8355a7db5e0 100644
--- a/doc/user/analytics/analytics_dashboards.md
+++ b/doc/user/analytics/analytics_dashboards.md
@@ -55,7 +55,7 @@ With custom dashboards, you can design and create visualizations for the metrics
You can create custom dashboards with the dashboard designer.
- Each project can have an unlimited number of dashboards.
-The only limitation might be the [repository size limit](../project/repository/reducing_the_repo_size_using_git.md#storage-limits).
+ The only limitation might be the [repository size limit](../project/repository/reducing_the_repo_size_using_git.md#storage-limits).
- Each dashboard can reference one or more [visualizations](#define-a-chart-visualization).
- Visualizations are shared across dashboards.
@@ -304,5 +304,5 @@ If a dashboard panel displays a message that the visualization configuration is
If a dashboard panel displays an error message:
- Check your [Cube query](../product_analytics/index.md#product-analytics-dashboards) and [visualization](../analytics/analytics_dashboards.md#define-a-chart-visualization)
-configurations, and make sure they are set up correctly.
+ configurations, and make sure they are set up correctly.
- For [product analytics](../product_analytics/index.md), also check that your visualization's Cube query is valid.
diff --git a/doc/user/analytics/dora_metrics.md b/doc/user/analytics/dora_metrics.md
index 53a25acbca5..372ea0a5807 100644
--- a/doc/user/analytics/dora_metrics.md
+++ b/doc/user/analytics/dora_metrics.md
@@ -121,7 +121,7 @@ GitLab calculates this as the number of incidents divided by the number of deplo
- [GitLab incidents](../../operations/incident_management/incidents.md) are tracked.
- All incidents are related to a production environment.
- Incidents and deployments have a strictly one-to-one relationship. An incident is related to only one production deployment, and any production deployment is related to no
-more than one incident.
+ more than one incident.
### How to improve change failure rate
diff --git a/doc/user/analytics/merge_request_analytics.md b/doc/user/analytics/merge_request_analytics.md
index 5b5b1ec002d..0d2c375f7ae 100644
--- a/doc/user/analytics/merge_request_analytics.md
+++ b/doc/user/analytics/merge_request_analytics.md
@@ -46,8 +46,8 @@ To view the number of merge requests merged during a specific date range:
1. Select a parameter.
1. Select a value or enter text to refine the results.
1. To adjust the date range:
- - In the **From** field, select a start date.
- - In the **To** field, select an end date.
+ - In the **From** field, select a start date.
+ - In the **To** field, select an end date.
The **Throughput** chart shows issues closed or merge requests merged (not closed) over a period of
time.
@@ -75,4 +75,4 @@ To view **Mean time to merge**:
1. On the left sidebar, select **Search or go to** and find your project.
1. Select **Analyze > Merge request analytics**. The **Mean time to merge** number
-is displayed on the dashboard.
+ is displayed on the dashboard.
diff --git a/doc/user/application_security/api_fuzzing/create_har_files.md b/doc/user/application_security/api_fuzzing/create_har_files.md
index 9c16c70c78f..01515a90653 100644
--- a/doc/user/application_security/api_fuzzing/create_har_files.md
+++ b/doc/user/application_security/api_fuzzing/create_har_files.md
@@ -105,7 +105,7 @@ responses in HAR format.
#### Create a HAR file with Fiddler
1. Go to the [Fiddler home page](https://www.telerik.com/fiddler) and sign in. If you don't already
-have an account, first create an account.
+ have an account, first create an account.
1. Browse pages that call an API. Fiddler automatically captures the requests.
1. Select one or more requests, then from the context menu, select **Export > Selected Sessions**.
1. In the **Choose Format** dropdown list select **HTTPArchive v1.2**.
diff --git a/doc/user/application_security/api_fuzzing/index.md b/doc/user/application_security/api_fuzzing/index.md
index 735b2356780..cab8c926def 100644
--- a/doc/user/application_security/api_fuzzing/index.md
+++ b/doc/user/application_security/api_fuzzing/index.md
@@ -498,15 +498,15 @@ The following is a summary of the variable scopes supported by the Postman Clien
- **Global Environment (Global) scope** is a special pre-defined environment that is available throughout a workspace. We can also refer to the _global environment_ scope as the _global_ scope. The Postman Client allows exporting the global environment into a JSON file, which can be used with API Fuzzing.
- **Environment scope** is a named group of variables created by a user in the Postman Client.
-The Postman Client supports a single active environment along with the global environment. The variables defined in an active user-created environment take precedence over variables defined in the global environment. The Postman Client allows exporting your environment into a JSON file, which can be used with API Fuzzing.
+ The Postman Client supports a single active environment along with the global environment. The variables defined in an active user-created environment take precedence over variables defined in the global environment. The Postman Client allows exporting your environment into a JSON file, which can be used with API Fuzzing.
- **Collection scope** is a group of variables declared in a given collection. The collection variables are available to the collection where they have been declared and the nested requests or collections. Variables defined in the collection scope take precedence over the _global environment_ scope and also the _environment_ scope.
-The Postman Client can export one or more collections into a JSON file, this JSON file contains selected collections, requests, and collection variables.
+ The Postman Client can export one or more collections into a JSON file, this JSON file contains selected collections, requests, and collection variables.
- **API Fuzzing Scope** is a new scope added by API Fuzzing to allow users to provide extra variables, or override variables defined in other supported scopes. This scope is not supported by Postman. The _API Fuzzing Scope_ variables are provided using a [custom JSON file format](#api-fuzzing-scope-custom-json-file-format).
- Override values defined in the environment or collection
- Defining variables from scripts
- Define a single row of data from the unsupported _data scope_
- **Data scope** is a group of variables in which their name and values come from JSON or CSV files. A Postman collection runner like [Newman](https://learning.postman.com/docs/running-collections/using-newman-cli/command-line-integration-with-newman/) or [Postman Collection Runner](https://learning.postman.com/docs/running-collections/intro-to-collection-runs/) executes the requests in a collection as many times as entries have the JSON or CSV file. A good use case for these variables is to automate tests using scripts in Postman.
-API Fuzzing does **not** support reading data from a CSV or JSON file.
+ API Fuzzing does **not** support reading data from a CSV or JSON file.
- **Local scope** are variables that are defined in Postman scripts. API Fuzzing does **not** support Postman scripts and by extension, variables defined in scripts. You can still provide values for the script-defined variables by defining them in one of the supported scopes, or our custom JSON format.
Not all scopes are supported by API Fuzzing and variables defined in scripts are not supported. The following table is sorted by broadest scope to narrowest scope.
diff --git a/doc/user/application_security/container_scanning/index.md b/doc/user/application_security/container_scanning/index.md
index 8af262e564b..a3f12d157f9 100644
--- a/doc/user/application_security/container_scanning/index.md
+++ b/doc/user/application_security/container_scanning/index.md
@@ -17,7 +17,7 @@ vulnerabilities and displays them in a merge request, you can use GitLab to audi
apps.
- <i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-For an overview, see [Container Scanning](https://www.youtube.com/watch?v=C0jn2eN5MAs).
+ For an overview, see [Container Scanning](https://www.youtube.com/watch?v=C0jn2eN5MAs).
- <i class="fa fa-youtube-play youtube" aria-hidden="true"></i> For a video walkthrough, see [How to set up Container Scanning using GitLab](https://youtu.be/h__mcXpil_4?si=w_BVG68qnkL9x4l1).
Container Scanning is often considered part of Software Composition Analysis (SCA). SCA can contain
@@ -766,8 +766,7 @@ The images use data from upstream advisory databases depending on which scanner
In addition to the sources provided by these scanners, GitLab maintains the following vulnerability databases:
-- The proprietary
-[GitLab Advisory Database](https://gitlab.com/gitlab-org/security-products/gemnasium-db).
+- The proprietary [GitLab Advisory Database](https://gitlab.com/gitlab-org/security-products/gemnasium-db).
- The open source [GitLab Advisory Database (Open Source Edition)](https://gitlab.com/gitlab-org/advisories-community).
In the GitLab Ultimate tier, the data from the [GitLab Advisory Database](https://gitlab.com/gitlab-org/security-products/gemnasium-db) is merged in to augment the data from the external sources. In the GitLab Premium and Free tiers, the data from the [GitLab Advisory Database (Open Source Edition)](https://gitlab.com/gitlab-org/advisories-community) is merged in to augment the data from the external sources. This augmentation currently only applies to the analyzer images for the Trivy scanner.
diff --git a/doc/user/application_security/dast/checks/78.1.md b/doc/user/application_security/dast/checks/78.1.md
index bcb655f37ae..ae0af7b1552 100644
--- a/doc/user/application_security/dast/checks/78.1.md
+++ b/doc/user/application_security/dast/checks/78.1.md
@@ -22,7 +22,7 @@ Ensure your application does not:
- Use user-supplied information in the process name to execute.
- Use user-supplied information in an OS command execution function which does
-not escape shell meta-characters.
+ not escape shell meta-characters.
- Use user-supplied information in arguments to OS commands.
The application should have a hardcoded set of arguments that are to be passed
diff --git a/doc/user/application_security/dast_api/index.md b/doc/user/application_security/dast_api/index.md
index acc6d30acb1..e69734403ea 100644
--- a/doc/user/application_security/dast_api/index.md
+++ b/doc/user/application_security/dast_api/index.md
@@ -417,15 +417,15 @@ The following is a summary of the variable scopes supported by the Postman Clien
- **Global Environment (Global) scope** is a special pre-defined environment that is available throughout a workspace. We can also refer to the _global environment_ scope as the _global_ scope. The Postman Client allows exporting the global environment into a JSON file, which can be used with DAST API.
- **Environment scope** is a named group of variables created by a user in the Postman Client.
-The Postman Client supports a single active environment along with the global environment. The variables defined in an active user-created environment take precedence over variables defined in the global environment. The Postman Client allows exporting your environment into a JSON file, which can be used with DAST API.
+ The Postman Client supports a single active environment along with the global environment. The variables defined in an active user-created environment take precedence over variables defined in the global environment. The Postman Client allows exporting your environment into a JSON file, which can be used with DAST API.
- **Collection scope** is a group of variables declared in a given collection. The collection variables are available to the collection where they have been declared and the nested requests or collections. Variables defined in the collection scope take precedence over the _global environment_ scope and also the _environment_ scope.
-The Postman Client can export one or more collections into a JSON file, this JSON file contains selected collections, requests, and collection variables.
+ The Postman Client can export one or more collections into a JSON file, this JSON file contains selected collections, requests, and collection variables.
- **DAST API Scope** is a new scope added by DAST API to allow users to provide extra variables, or override variables defined in other supported scopes. This scope is not supported by Postman. The _DAST API Scope_ variables are provided using a [custom JSON file format](#dast-api-scope-custom-json-file-format).
- Override values defined in the environment or collection
- Defining variables from scripts
- Define a single row of data from the unsupported _data scope_
- **Data scope** is a group of variables in which their name and values come from JSON or CSV files. A Postman collection runner like [Newman](https://learning.postman.com/docs/running-collections/using-newman-cli/command-line-integration-with-newman/) or [Postman Collection Runner](https://learning.postman.com/docs/running-collections/intro-to-collection-runs/) executes the requests in a collection as many times as entries have the JSON or CSV file. A good use case for these variables is to automate tests using scripts in Postman.
-DAST API does **not** support reading data from a CSV or JSON file.
+ DAST API does **not** support reading data from a CSV or JSON file.
- **Local scope** are variables that are defined in Postman scripts. DAST API does **not** support Postman scripts and by extension, variables defined in scripts. You can still provide values for the script-defined variables by defining them in one of the supported scopes, or our custom JSON format.
Not all scopes are supported by DAST API and variables defined in scripts are not supported. The following table is sorted by broadest scope to narrowest scope.
diff --git a/doc/user/application_security/dependency_scanning/index.md b/doc/user/application_security/dependency_scanning/index.md
index c0af4f7716c..02d23d2b020 100644
--- a/doc/user/application_security/dependency_scanning/index.md
+++ b/doc/user/application_security/dependency_scanning/index.md
@@ -69,11 +69,11 @@ WARNING:
Dependency Scanning does not support runtime installation of compilers and interpreters.
- <i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-For an overview, see [Dependency Scanning](https://www.youtube.com/watch?v=TBnfbGk4c4o)
+ For an overview, see [Dependency Scanning](https://www.youtube.com/watch?v=TBnfbGk4c4o)
- <i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-For an interactive reading and how-to demo of this Dependency Scanning documentation, see [How to use dependency scanning tutorial hands-on GitLab Application Security part 3](https://youtu.be/ii05cMbJ4xQ?feature=shared)
+ For an interactive reading and how-to demo of this Dependency Scanning documentation, see [How to use dependency scanning tutorial hands-on GitLab Application Security part 3](https://youtu.be/ii05cMbJ4xQ?feature=shared)
- <i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-For other interactive reading and how-to demos, see [Get Started With GitLab Application Security Playlist](https://www.youtube.com/playlist?list=PL05JrBw4t0KrUrjDoefSkgZLx5aJYFaF9)
+ For other interactive reading and how-to demos, see [Get Started With GitLab Application Security Playlist](https://www.youtube.com/playlist?list=PL05JrBw4t0KrUrjDoefSkgZLx5aJYFaF9)
## Supported languages and package managers
diff --git a/doc/user/application_security/policies/scan-result-policies.md b/doc/user/application_security/policies/scan-result-policies.md
index e666457fd9b..33db2695732 100644
--- a/doc/user/application_security/policies/scan-result-policies.md
+++ b/doc/user/application_security/policies/scan-result-policies.md
@@ -45,10 +45,10 @@ A project can have multiple pipeline types configured. A single commit can initi
pipelines, each of which may contain a security scan.
- In GitLab 16.3 and later, the results of all completed pipelines for the latest commit in
-the merge request's source and target branch are evaluated and used to enforce the scan result policy.
-Parent-child pipelines and on-demand DAST pipelines are not considered.
+ the merge request's source and target branch are evaluated and used to enforce the scan result policy.
+ Parent-child pipelines and on-demand DAST pipelines are not considered.
- In GitLab 16.2 and earlier, only the results of the latest completed pipeline were evaluated
-when enforcing scan result policies.
+ when enforcing scan result policies.
## Scan result policy editor
diff --git a/doc/user/application_security/sast/index.md b/doc/user/application_security/sast/index.md
index 669822f3f0f..f309b0f11fb 100644
--- a/doc/user/application_security/sast/index.md
+++ b/doc/user/application_security/sast/index.md
@@ -76,14 +76,17 @@ For more information about our plans for language support in SAST, see the [cate
| Ruby | [brakeman](https://gitlab.com/gitlab-org/security-products/analyzers/brakeman) | 13.9 |
| Ruby on Rails | [brakeman](https://gitlab.com/gitlab-org/security-products/analyzers/brakeman) | 10.3 |
| Scala (any build system) | [Semgrep](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep) with [GitLab-managed rules](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep/#sast-rules) | 16.0 |
-| Scala<sup>1</sup> | [SpotBugs](https://gitlab.com/gitlab-org/security-products/analyzers/spotbugs) with the find-sec-bugs plugin | 11.0 (SBT) & 11.9 (Gradle, Maven) |
+| Scala <sup>1</sup> | [SpotBugs](https://gitlab.com/gitlab-org/security-products/analyzers/spotbugs) with the find-sec-bugs plugin | 11.0 (SBT) & 11.9 (Gradle, Maven) |
| Swift (iOS) | [MobSF (beta)](https://gitlab.com/gitlab-org/security-products/analyzers/mobsf) | 13.5 |
| TypeScript | [Semgrep](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep) with [GitLab-managed rules](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep/#sast-rules) | 13.10 |
-1. The SpotBugs-based analyzer supports [Gradle](https://gradle.org/), [Maven](https://maven.apache.org/), and [SBT](https://www.scala-sbt.org/). It can also be used with variants like the
-[Gradle wrapper](https://docs.gradle.org/current/userguide/gradle_wrapper.html),
-[Grails](https://grails.org/),
-and the [Maven wrapper](https://github.com/takari/maven-wrapper). However, SpotBugs has [limitations](https://gitlab.com/gitlab-org/gitlab/-/issues/350801) when used against [Ant](https://ant.apache.org/)-based projects. We recommend using the Semgrep-based analyzer for Ant-based Java or Scala projects.
+<html>
+<small>Footnotes:
+ <ol>
+ <li>The SpotBugs-based analyzer supports [Gradle](https://gradle.org/), [Maven](https://maven.apache.org/), and [SBT](https://www.scala-sbt.org/). It can also be used with variants like the [Gradle wrapper](https://docs.gradle.org/current/userguide/gradle_wrapper.html), [Grails](https://grails.org/), and the [Maven wrapper](https://github.com/takari/maven-wrapper). However, SpotBugs has [limitations](https://gitlab.com/gitlab-org/gitlab/-/issues/350801) when used against [Ant](https://ant.apache.org/)-based projects. We recommend using the Semgrep-based analyzer for Ant-based Java or Scala projects.</li>
+ </ol>
+</small>
+</html>
## End of supported analyzers
diff --git a/doc/user/application_security/secret_detection/index.md b/doc/user/application_security/secret_detection/index.md
index 6b4d7ebec4c..0eb79bfbe5a 100644
--- a/doc/user/application_security/secret_detection/index.md
+++ b/doc/user/application_security/secret_detection/index.md
@@ -119,8 +119,8 @@ See [Use security scanning tools with merge request pipelines](../index.md#use-s
Prerequisites:
- Linux-based GitLab Runner with the [`docker`](https://docs.gitlab.com/runner/executors/docker.html) or
-[`kubernetes`](https://docs.gitlab.com/runner/install/kubernetes.html) executor. If you're using the
-shared runners on GitLab.com, this is enabled by default.
+ [`kubernetes`](https://docs.gitlab.com/runner/install/kubernetes.html) executor. If you're using the
+ shared runners on GitLab.com, this is enabled by default.
- Windows Runners are not supported.
- CPU architectures other than amd64 are not supported.
- If you use your own runners, make sure the Docker version installed is **not** `19.03.0`. See
diff --git a/doc/user/group/access_and_permissions.md b/doc/user/group/access_and_permissions.md
index e08cfea7095..0a35d0b6b8f 100644
--- a/doc/user/group/access_and_permissions.md
+++ b/doc/user/group/access_and_permissions.md
@@ -63,8 +63,8 @@ address. This top-level group setting applies to:
- The GitLab UI, including subgroups, projects, and issues. It does not apply to GitLab Pages.
- [In GitLab 12.3 and later](https://gitlab.com/gitlab-org/gitlab/-/issues/12874), the API.
- In self-managed installations of GitLab 15.1 and later, you can also configure
-[globally-allowed IP address ranges](../../administration/settings/visibility_and_access_controls.md#configure-globally-allowed-ip-address-ranges)
-at the group level.
+ [globally-allowed IP address ranges](../../administration/settings/visibility_and_access_controls.md#configure-globally-allowed-ip-address-ranges)
+ at the group level.
Administrators can combine restricted access by IP address with
[globally-allowed IP addresses](../../administration/settings/visibility_and_access_controls.md#configure-globally-allowed-ip-address-ranges).
diff --git a/doc/user/group/devops_adoption/index.md b/doc/user/group/devops_adoption/index.md
index cdb11bb0548..978c893a0ec 100644
--- a/doc/user/group/devops_adoption/index.md
+++ b/doc/user/group/devops_adoption/index.md
@@ -21,9 +21,9 @@ DevOps Adoption shows you how groups in your organization adopt and use the most
You can use Group DevOps Adoption to:
- Identify specific subgroups that are lagging in their adoption of GitLab features, so you can guide them on
-their DevOps journey.
+ their DevOps journey.
- Find subgroups that have adopted certain features, and provide guidance to other subgroups on
-how to use those features.
+ how to use those features.
- Verify if you are getting the return on investment that you expected from GitLab.
![DevOps Adoption](img/group_devops_adoption_v14_2.png)
@@ -43,11 +43,11 @@ To view DevOps Adoption:
DevOps Adoption shows feature adoption for development, security, and operations.
-| Category | Feature |
-| --- | --- |
-| Development | Approvals<br>Code owners<br>Issues<br>Merge requests |
-| Security | DAST<br>Dependency Scanning<br>Fuzz Testing<br>SAST |
-| Operations | Deployments<br>Pipelines<br>Runners |
+| Category | Feature |
+|-------------|---------|
+| Development | Approvals<br>Code owners<br>Issues<br>Merge requests |
+| Security | DAST<br>Dependency Scanning<br>Fuzz Testing<br>SAST |
+| Operations | Deployments<br>Pipelines<br>Runners |
## Feature adoption
diff --git a/doc/user/group/insights/index.md b/doc/user/group/insights/index.md
index 6ca37cb9a2c..ad93e783b88 100644
--- a/doc/user/group/insights/index.md
+++ b/doc/user/group/insights/index.md
@@ -74,7 +74,7 @@ GitLab reads insights from the
To configure group insights:
1. Create a new file [`.gitlab/insights.yml`](../../project/insights/index.md#configure-project-insights)
-in a project that belongs to your group.
+ in a project that belongs to your group.
1. On the left sidebar, select **Search or go to** and find your group.
1. Select **Settings > General**.
1. Expand **Analytics** and find the **Insights** section.
diff --git a/doc/user/group/subgroups/index.md b/doc/user/group/subgroups/index.md
index baa8f11240f..a43de3ef73b 100644
--- a/doc/user/group/subgroups/index.md
+++ b/doc/user/group/subgroups/index.md
@@ -52,7 +52,7 @@ graph TD
Prerequisites:
- To view private nested subgroups, you must be a direct or inherited member of
-the private subgroup.
+ the private subgroup.
To view the subgroups of a group:
diff --git a/doc/user/group/value_stream_analytics/index.md b/doc/user/group/value_stream_analytics/index.md
index 0fdd572ed7c..af391de7bcb 100644
--- a/doc/user/group/value_stream_analytics/index.md
+++ b/doc/user/group/value_stream_analytics/index.md
@@ -305,7 +305,7 @@ In GitLab 13.8 and earlier, deployment frequency metrics are calculated based on
Prerequisites:
- To view deployment metrics, you must have a
-[production environment configured](#how-value-stream-analytics-identifies-the-production-environment).
+ [production environment configured](#how-value-stream-analytics-identifies-the-production-environment).
To view lifecycle metrics:
@@ -442,11 +442,11 @@ After you create a value stream, you can customize it to suit your purposes. To
1. In the upper-right corner, select the dropdown list, then select a value stream.
1. Next to the value stream dropdown list, select **Edit**.
1. Optional:
- - Rename the value stream.
- - Hide or re-order default stages.
- - Remove existing custom stages.
- - To add new stages, select **Add another stage**.
- - Select the start and end events for the stage.
+ - Rename the value stream.
+ - Hide or re-order default stages.
+ - Remove existing custom stages.
+ - To add new stages, select **Add another stage**.
+ - Select the start and end events for the stage.
1. Optional. To undo any modifications, select **Restore value stream defaults**.
1. Select **Save Value Stream**.
diff --git a/doc/user/product_analytics/index.md b/doc/user/product_analytics/index.md
index 1a628cf9d15..54120ff2330 100644
--- a/doc/user/product_analytics/index.md
+++ b/doc/user/product_analytics/index.md
@@ -106,7 +106,7 @@ To onboard a project:
1. Select **Analyze > Analytics dashboards**.
1. Under **Product analytics**, select **Set up**.
1. Select **Set up product analytics**.
-Your instance is being created, and the project onboarded.
+ Your instance is being created, and the project onboarded.
## Instrument your application
diff --git a/doc/user/product_analytics/instrumentation/browser_sdk.md b/doc/user/product_analytics/instrumentation/browser_sdk.md
index 6bc9a9ef234..b9cfbc5b2df 100644
--- a/doc/user/product_analytics/instrumentation/browser_sdk.md
+++ b/doc/user/product_analytics/instrumentation/browser_sdk.md
@@ -92,7 +92,7 @@ interface GitLabClientSDKOptions {
### Plugins
- `Client Hints`: An alternative to tracking the User Agent, which is particularly useful in browsers that are freezing the User Agent string.
-Enabling this plugin will automatically capture the following context:
+ Enabling this plugin will automatically capture the following context:
For example,
[iglu:org.ietf/http_client_hints/jsonschema/1-0-0](https://github.com/snowplow/iglu-central/blob/master/schemas/org.ietf/http_client_hints/jsonschema/1-0-0)
@@ -163,12 +163,12 @@ glClient.page(eventAttributes);
The `eventAttributes` object supports the following optional properties:
-| Property | Type | Description |
-| :--------------- | :-------------------------- | :---------------------------------------------------------------------------- |
-| `title` | `String` | Override the default page title. |
-| `contextCallback` | `Function` | A callback that fires on the page view. |
-| `context` | `Object` | Add context (additional information) on the page view. |
-| `timestamp` | `timestamp` | Set the true timestamp or overwrite the device-sent timestamp on an event. |
+| Property | Type | Description |
+|:------------------|:------------|:------------|
+| `title` | `String` | Override the default page title. |
+| `contextCallback` | `Function` | A callback that fires on the page view. |
+| `context` | `Object` | Add context (additional information) on the page view. |
+| `timestamp` | `timestamp` | Set the true timestamp or overwrite the device-sent timestamp on an event. |
### `track`
diff --git a/doc/user/project/members/share_project_with_groups.md b/doc/user/project/members/share_project_with_groups.md
index 22881a14704..bf8a7468199 100644
--- a/doc/user/project/members/share_project_with_groups.md
+++ b/doc/user/project/members/share_project_with_groups.md
@@ -16,7 +16,7 @@ For a project that was created by `Group 1`:
- The members of `Group 1` have access to the project.
- The owner of `Group 1` can invite `Group 2` to the project.
-This way, members of both `Group 1` and `Group 2` have access to the shared project.
+ This way, members of both `Group 1` and `Group 2` have access to the shared project.
## Prerequisites
@@ -31,7 +31,7 @@ In addition:
- You must be a member of the group or the subgroup being invited.
- The [visibility level](../../public_access.md) of the group you're inviting
-must be at least as restrictive as that of the project. For example, you can invite:
+ must be at least as restrictive as that of the project. For example, you can invite:
- A _private_ group to a _private_ project
- A _private_ group to an _internal_ project.
- A _private_ group to a _public_ project.
@@ -46,12 +46,9 @@ must be at least as restrictive as that of the project. For example, you can inv
## Share a project with a group
-> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/247208) in GitLab 13.11 from a form to a modal
- window [with a flag](../../feature_flags.md). Disabled by default.
-> - Modal window [enabled on GitLab.com and self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/247208)
- in GitLab 14.8.
-> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/352526) in GitLab 14.9.
- [Feature flag `invite_members_group_modal`](https://gitlab.com/gitlab-org/gitlab/-/issues/352526) removed.
+> - [Changed](https://gitlab.com/gitlab-org/gitlab/-/issues/247208) in GitLab 13.11 from a form to a modal window [with a flag](../../feature_flags.md). Disabled by default.
+> - Modal window [enabled on GitLab.com and self-managed](https://gitlab.com/gitlab-org/gitlab/-/issues/247208) in GitLab 14.8.
+> - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/352526) in GitLab 14.9. [Feature flag `invite_members_group_modal`](https://gitlab.com/gitlab-org/gitlab/-/issues/352526) removed.
Similar to how you [share a group with another group](../../group/manage.md#share-a-group-with-another-group),
you can share a project with a group by inviting that group to the project.
diff --git a/doc/user/project/service_desk/configure.md b/doc/user/project/service_desk/configure.md
index d002bde65bf..721508acb24 100644
--- a/doc/user/project/service_desk/configure.md
+++ b/doc/user/project/service_desk/configure.md
@@ -63,14 +63,14 @@ For example, you can format the emails to include a header and footer in accorda
organization's brand guidelines. You can also include the following placeholders to display dynamic
content specific to the Service Desk ticket or your GitLab instance.
-| Placeholder | `thank_you.md` | `new_note.md` | Description
-| ---------------------- | ---------------------- | ---------------------- | -----------
-| `%{ISSUE_ID}` | **{check-circle}** Yes | **{check-circle}** Yes | Ticket IID.
-| `%{ISSUE_PATH}` | **{check-circle}** Yes | **{check-circle}** Yes | Project path appended with the ticket IID.
-| `%{ISSUE_URL}` | **{check-circle}** Yes | **{check-circle}** Yes | URL of the ticket. External participants can only view the ticket if the project is public and ticket is not confidential (Service Desk tickets are confidential by default).
-| `%{ISSUE_DESCRIPTION}` | **{check-circle}** Yes | **{check-circle}** Yes | Ticket description. If a user has edited the description, it may contain sensitive information that is not intended to be delivered to external participants. Use this placeholder with care and ideally only if you never modify descriptions or your team is aware of the template design.
-| `%{UNSUBSCRIBE_URL}` | **{check-circle}** Yes | **{check-circle}** Yes | Unsubscribe URL.
-| `%{NOTE_TEXT}` | **{dotted-circle}** No | **{check-circle}** Yes | The new comment added to the ticket by a user. Take care to include this placeholder in `new_note.md`. Otherwise, the requesters may never see the updates on their Service Desk ticket.
+| Placeholder | `thank_you.md` | `new_note.md` | Description |
+|------------------------|------------------------|------------------------|-------------|
+| `%{ISSUE_ID}` | **{check-circle}** Yes | **{check-circle}** Yes | Ticket IID. |
+| `%{ISSUE_PATH}` | **{check-circle}** Yes | **{check-circle}** Yes | Project path appended with the ticket IID. |
+| `%{ISSUE_URL}` | **{check-circle}** Yes | **{check-circle}** Yes | URL of the ticket. External participants can only view the ticket if the project is public and ticket is not confidential (Service Desk tickets are confidential by default). |
+| `%{ISSUE_DESCRIPTION}` | **{check-circle}** Yes | **{check-circle}** Yes | Ticket description. If a user has edited the description, it may contain sensitive information that is not intended to be delivered to external participants. Use this placeholder with care and ideally only if you never modify descriptions or your team is aware of the template design. |
+| `%{UNSUBSCRIBE_URL}` | **{check-circle}** Yes | **{check-circle}** Yes | Unsubscribe URL. |
+| `%{NOTE_TEXT}` | **{dotted-circle}** No | **{check-circle}** Yes | The new comment added to the ticket by a user. Take care to include this placeholder in `new_note.md`. Otherwise, the requesters may never see the updates on their Service Desk ticket. |
### Thank you email
@@ -947,7 +947,7 @@ or completely separately.
::EndTabs
1. GitLab offers two methods to transport emails from `mail_room` to the GitLab
-application. You can configure the `delivery_method` for each email setting individually:
+ application. You can configure the `delivery_method` for each email setting individually:
1. Recommended: `webhook` (default in GitLab 15.3 and later) sends the email payload via an API POST request to your GitLab
application. It uses a shared token to authenticate. If you choose this method,
make sure the `mail_room` process can access the API endpoint and distribute the shared
diff --git a/doc/user/project/use_project_as_go_package.md b/doc/user/project/use_project_as_go_package.md
index 54e9eac7756..bf11cd784cb 100644
--- a/doc/user/project/use_project_as_go_package.md
+++ b/doc/user/project/use_project_as_go_package.md
@@ -10,7 +10,7 @@ Prerequisites:
- Contact your administrator to enable the [GitLab Go Proxy](../packages/go_proxy/index.md).
- To use a private project in a subgroup as a Go package, you must [authenticate Go requests](#authenticate-go-requests-to-private-projects). Go requests that are not authenticated cause
-`go get` to fail. You don't need to authenticate Go requests for projects that are not in subgroups.
+ `go get` to fail. You don't need to authenticate Go requests for projects that are not in subgroups.
To use a project as a Go package, use the `go get` and `godoc.org` discovery requests. You can use the meta tags:
diff --git a/doc/user/project/working_with_projects.md b/doc/user/project/working_with_projects.md
index c8572be96ab..7d8305519e4 100644
--- a/doc/user/project/working_with_projects.md
+++ b/doc/user/project/working_with_projects.md
@@ -309,7 +309,7 @@ Prerequisites:
1. [Create a group](../group/index.md#create-a-group) to track membership of your project.
1. [Set up LDAP synchronization](../../administration/auth/ldap/ldap_synchronization.md) for that group.
1. To use LDAP groups to manage access to a project,
-[add the LDAP-synchronized group as a member](../group/manage.md) to the project.
+ [add the LDAP-synchronized group as a member](../group/manage.md) to the project.
## Troubleshooting
diff --git a/gems/gitlab-database-load_balancing/Gemfile.lock b/gems/gitlab-database-load_balancing/Gemfile.lock
index b2d66b9a386..a6148494c1b 100644
--- a/gems/gitlab-database-load_balancing/Gemfile.lock
+++ b/gems/gitlab-database-load_balancing/Gemfile.lock
@@ -25,7 +25,7 @@ PATH
actionview (>= 6.1.7.2)
activesupport (>= 6.1.7.2)
addressable (~> 2.8)
- nokogiri (~> 1.15.2)
+ nokogiri (>= 1.15)
rake (~> 13.0)
PATH
diff --git a/gems/gitlab-http/Gemfile.lock b/gems/gitlab-http/Gemfile.lock
index 5fb1963d8f3..3f48e2157e5 100644
--- a/gems/gitlab-http/Gemfile.lock
+++ b/gems/gitlab-http/Gemfile.lock
@@ -13,7 +13,7 @@ PATH
actionview (>= 6.1.7.2)
activesupport (>= 6.1.7.2)
addressable (~> 2.8)
- nokogiri (~> 1.15.2)
+ nokogiri (>= 1.15)
rake (~> 13.0)
PATH
diff --git a/gems/gitlab-utils/Gemfile.lock b/gems/gitlab-utils/Gemfile.lock
index ef7c2d57c7a..16e7854c817 100644
--- a/gems/gitlab-utils/Gemfile.lock
+++ b/gems/gitlab-utils/Gemfile.lock
@@ -13,7 +13,7 @@ PATH
actionview (>= 6.1.7.2)
activesupport (>= 6.1.7.2)
addressable (~> 2.8)
- nokogiri (~> 1.15.2)
+ nokogiri (>= 1.15)
rake (~> 13.0)
GEM
@@ -77,7 +77,7 @@ GEM
method_source (1.0.0)
mini_portile2 (2.8.2)
minitest (5.18.1)
- nokogiri (1.15.2)
+ nokogiri (1.16.0)
mini_portile2 (~> 2.8.2)
racc (~> 1.4)
parallel (1.23.0)
diff --git a/gems/gitlab-utils/gitlab-utils.gemspec b/gems/gitlab-utils/gitlab-utils.gemspec
index d5f6deb7fe6..f0767cb7053 100644
--- a/gems/gitlab-utils/gitlab-utils.gemspec
+++ b/gems/gitlab-utils/gitlab-utils.gemspec
@@ -21,7 +21,7 @@ Gem::Specification.new do |spec|
spec.add_runtime_dependency "actionview", ">= 6.1.7.2"
spec.add_runtime_dependency "activesupport", ">= 6.1.7.2"
spec.add_runtime_dependency "addressable", "~> 2.8"
- spec.add_runtime_dependency "nokogiri", "~> 1.15.2"
+ spec.add_runtime_dependency "nokogiri", ">= 1.15"
spec.add_runtime_dependency "rake", "~> 13.0"
spec.add_development_dependency "factory_bot_rails", "~> 6.2.0"
diff --git a/lib/gitlab/background_migration/backfill_vs_code_settings_version.rb b/lib/gitlab/background_migration/backfill_vs_code_settings_version.rb
new file mode 100644
index 00000000000..83dbf5f3852
--- /dev/null
+++ b/lib/gitlab/background_migration/backfill_vs_code_settings_version.rb
@@ -0,0 +1,38 @@
+# frozen_string_literal: true
+
+module Gitlab
+ module BackgroundMigration
+ class BackfillVsCodeSettingsVersion < BatchedMigrationJob
+ feature_category :web_ide
+ operation_name :backfill_vs_code_settings_version
+ scope_to ->(relation) { relation.where(version: [nil, 0]) }
+
+ class VsCodeSetting < ApplicationRecord
+ DEFAULT_SETTING_VERSIONS = {
+ 'settings' => 2,
+ 'extensions' => 6,
+ 'globalState' => 1,
+ 'keybindings' => 2,
+ 'snippets' => 1,
+ 'machines' => 1,
+ 'tasks' => 1,
+ 'profiles' => 2
+ }.freeze
+
+ self.table_name = 'vs_code_settings'
+ end
+
+ def perform
+ each_sub_batch do |sub_batch|
+ vs_code_settings = sub_batch.map do |vs_code_setting|
+ version = VsCodeSetting::DEFAULT_SETTING_VERSIONS[vs_code_setting.setting_type]
+
+ vs_code_setting.attributes.merge(version: version)
+ end
+
+ VsCodeSetting.upsert_all(vs_code_settings)
+ end
+ end
+ end
+ end
+end
diff --git a/lib/gitlab/database/migrations/squasher.rb b/lib/gitlab/database/migrations/squasher.rb
index 98fdf873aa5..3bec9eabbe2 100644
--- a/lib/gitlab/database/migrations/squasher.rb
+++ b/lib/gitlab/database/migrations/squasher.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
module Gitlab
module Database
diff --git a/lib/gitlab/diff/rendered/notebook/diff_file_helper.rb b/lib/gitlab/diff/rendered/notebook/diff_file_helper.rb
index ad709a79f30..b4b7d572901 100644
--- a/lib/gitlab/diff/rendered/notebook/diff_file_helper.rb
+++ b/lib/gitlab/diff/rendered/notebook/diff_file_helper.rb
@@ -4,7 +4,7 @@ module Gitlab
module Rendered
module Notebook
module DiffFileHelper
- require 'set'
+ require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
EMBEDDED_IMAGE_PATTERN = ' ![](data:image'
diff --git a/lib/gitlab/error_tracking/processor/sidekiq_processor.rb b/lib/gitlab/error_tracking/processor/sidekiq_processor.rb
index cc8cfd827f1..a0b6318e066 100644
--- a/lib/gitlab/error_tracking/processor/sidekiq_processor.rb
+++ b/lib/gitlab/error_tracking/processor/sidekiq_processor.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
module Gitlab
module ErrorTracking
diff --git a/lib/gitlab/file_detector.rb b/lib/gitlab/file_detector.rb
index b586c4b5892..0a56cde8cad 100644
--- a/lib/gitlab/file_detector.rb
+++ b/lib/gitlab/file_detector.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
module Gitlab
# Module that can be used to detect if a path points to a special file such as
diff --git a/lib/gitlab/instrumentation/redis.rb b/lib/gitlab/instrumentation/redis.rb
index 590153ad9cd..3bc9f09d977 100644
--- a/lib/gitlab/instrumentation/redis.rb
+++ b/lib/gitlab/instrumentation/redis.rb
@@ -33,6 +33,10 @@ module Gitlab
super.merge(*STORAGES.flat_map(&:payload))
end
+ def storage_hash
+ @storage_hash ||= STORAGES.index_by { |k| k.name.demodulize }
+ end
+
def detail_store
STORAGES.flat_map do |storage|
storage.detail_store.map { |details| details.merge(storage: storage.name.demodulize) }
diff --git a/lib/gitlab/instrumentation/redis_client_middleware.rb b/lib/gitlab/instrumentation/redis_client_middleware.rb
new file mode 100644
index 00000000000..a49d8370d4c
--- /dev/null
+++ b/lib/gitlab/instrumentation/redis_client_middleware.rb
@@ -0,0 +1,46 @@
+# frozen_string_literal: true
+
+# This module references https://github.com/redis-rb/redis-client#instrumentation-and-middlewares
+# implementing `call`, and `call_pipelined`.
+module Gitlab
+ module Instrumentation
+ module RedisClientMiddleware
+ include RedisHelper
+
+ def call(command, redis_config)
+ instrumentation = instrumentation_class(redis_config)
+
+ result = instrument_call([command], instrumentation) do
+ super
+ end
+
+ measure_io(command, result, instrumentation) if ::RequestStore.active?
+
+ result
+ end
+
+ def call_pipelined(commands, redis_config)
+ instrumentation = instrumentation_class(redis_config)
+
+ result = instrument_call(commands, instrumentation, true) do
+ super
+ end
+
+ measure_io(commands, result, instrumentation) if ::RequestStore.active?
+
+ result
+ end
+
+ private
+
+ def measure_io(command, result, instrumentation)
+ measure_write_size(command, instrumentation)
+ measure_read_size(result, instrumentation)
+ end
+
+ def instrumentation_class(config)
+ Gitlab::Instrumentation::Redis.storage_hash[config.custom[:instrumentation_class]]
+ end
+ end
+ end
+end
diff --git a/lib/gitlab/instrumentation/redis_helper.rb b/lib/gitlab/instrumentation/redis_helper.rb
index ba1c8132250..392a7ebe852 100644
--- a/lib/gitlab/instrumentation/redis_helper.rb
+++ b/lib/gitlab/instrumentation/redis_helper.rb
@@ -15,7 +15,7 @@ module Gitlab
end
yield
- rescue ::Redis::BaseError => ex
+ rescue ::Redis::BaseError, ::RedisClient::Error => ex
if ex.message.start_with?('MOVED', 'ASK')
instrumentation_class.instance_count_cluster_redirection(ex)
else
diff --git a/lib/gitlab/memory/watchdog/handlers/sidekiq_handler.rb b/lib/gitlab/memory/watchdog/handlers/sidekiq_handler.rb
index 47ed608c576..9da662d5f1b 100644
--- a/lib/gitlab/memory/watchdog/handlers/sidekiq_handler.rb
+++ b/lib/gitlab/memory/watchdog/handlers/sidekiq_handler.rb
@@ -18,8 +18,8 @@ module Gitlab
return true unless @alive
# Tell sidekiq to restart itself
- # Keep extra safe to wait `Sidekiq[:timeout] + 2` seconds before SIGKILL
- send_signal(:TERM, $$, 'gracefully shut down', Sidekiq[:timeout] + 2)
+ # Keep extra safe to wait `Sidekiq.default_configuration[:timeout] + 2` seconds before SIGKILL
+ send_signal(:TERM, $$, 'gracefully shut down', Sidekiq.default_configuration[:timeout] + 2)
return true unless @alive
# Ideally we should never reach this condition
diff --git a/lib/gitlab/patch/sidekiq_cron_poller.rb b/lib/gitlab/patch/sidekiq_cron_poller.rb
index 3f962c47ae9..8c2d1181611 100644
--- a/lib/gitlab/patch/sidekiq_cron_poller.rb
+++ b/lib/gitlab/patch/sidekiq_cron_poller.rb
@@ -7,7 +7,7 @@
require 'sidekiq/version'
require 'sidekiq/cron/version'
-if Gem::Version.new(Sidekiq::VERSION) != Gem::Version.new('6.5.12')
+if Gem::Version.new(Sidekiq::VERSION) != Gem::Version.new('7.1.6')
raise 'New version of sidekiq detected, please remove or update this patch'
end
diff --git a/lib/gitlab/rack_attack/user_allowlist.rb b/lib/gitlab/rack_attack/user_allowlist.rb
index f3043f44091..c1da1fabef5 100644
--- a/lib/gitlab/rack_attack/user_allowlist.rb
+++ b/lib/gitlab/rack_attack/user_allowlist.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
module Gitlab
module RackAttack
diff --git a/lib/gitlab/redis/wrapper.rb b/lib/gitlab/redis/wrapper.rb
index b2494c7d43c..1f5d8ab7c9b 100644
--- a/lib/gitlab/redis/wrapper.rb
+++ b/lib/gitlab/redis/wrapper.rb
@@ -19,7 +19,7 @@ module Gitlab
InvalidPathError = Class.new(StandardError)
class << self
- delegate :params, :url, :store, :encrypted_secrets, to: :new
+ delegate :params, :url, :store, :encrypted_secrets, :redis_client_params, to: :new
def with
pool.with { |redis| yield redis }
@@ -101,6 +101,33 @@ module Gitlab
redis_store_options
end
+ # redis_client_params modifies redis_store_options to be compatible with redis-client
+ # TODO: when redis-rb is updated to v5, there is no need to support 2 types of config format
+ def redis_client_params
+ options = redis_store_options
+
+ # avoid passing classes into options as Sidekiq scrubs the options with Marshal.dump + Marshal.load
+ # ref https://github.com/sidekiq/sidekiq/blob/v7.1.6/lib/sidekiq/redis_connection.rb#L37
+ #
+ # this does not play well with spring enabled as the forked process references the old constant
+ # we use strings to look up Gitlab::Instrumentation::Redis.storage_hash as a bypass
+ options[:custom] = { instrumentation_class: self.class.store_name }
+
+ # TODO: add support for cluster when upgrading to redis-rb v5.y.z we do not need cluster support
+ # as Sidekiq workload should not and does not run in a Redis Cluster
+ # support to be added in https://gitlab.com/gitlab-org/gitlab/-/merge_requests/134862
+ if options[:sentinels]
+ # name is required in RedisClient::SentinelConfig
+ # https://github.com/redis-rb/redis-client/blob/1ab081c1d0e47df5d55e011c9390c70b2eef6731/lib/redis_client/sentinel_config.rb#L17
+ options[:name] = options[:host]
+ options.except(:scheme, :instrumentation_class, :host, :port)
+ else
+ # remove disallowed keys as seen in
+ # https://github.com/redis-rb/redis-client/blob/1ab081c1d0e47df5d55e011c9390c70b2eef6731/lib/redis_client/config.rb#L21
+ options.except(:scheme, :instrumentation_class)
+ end
+ end
+
def url
raw_config_hash[:url]
end
@@ -188,6 +215,7 @@ module Gitlab
config
else
redis_hash = ::Redis::Store::Factory.extract_host_options_from_uri(redis_url)
+ redis_hash[:ssl] = true if redis_hash[:scheme] == 'rediss'
# order is important here, sentinels must be after the connection keys.
# {url: ..., port: ..., sentinels: [...]}
redis_hash.merge(config)
diff --git a/lib/gitlab/runtime.rb b/lib/gitlab/runtime.rb
index 269fb74ceca..e560db7ace8 100644
--- a/lib/gitlab/runtime.rb
+++ b/lib/gitlab/runtime.rb
@@ -89,12 +89,11 @@ module Gitlab
if puma? && ::Puma.respond_to?(:cli_config)
threads += ::Puma.cli_config.options[:max_threads]
elsif sidekiq?
- # 2 extra threads for the pollers in Sidekiq and Sidekiq Cron:
- # https://github.com/ondrejbartas/sidekiq-cron#under-the-hood
+ # Sidekiq has a internal connection pool to handle heartbeat, scheduled polls,
+ # cron polls and housekeeping. max_threads can match Sidekqi process's concurrency.
#
- # These threads execute Sidekiq client middleware when jobs
- # are enqueued and those can access DB / Redis.
- threads += Sidekiq[:concurrency] + 2
+ # The Sidekiq main thread does not perform GitLab-related logic, so we can ignore it.
+ threads = Sidekiq.default_configuration[:concurrency]
end
if puma?
diff --git a/lib/gitlab/sidekiq_config.rb b/lib/gitlab/sidekiq_config.rb
index b2ff80b2357..f4dabb7498f 100644
--- a/lib/gitlab/sidekiq_config.rb
+++ b/lib/gitlab/sidekiq_config.rb
@@ -1,6 +1,7 @@
# frozen_string_literal: true
require 'yaml'
+require 'sidekiq/capsule'
module Gitlab
module SidekiqConfig
@@ -161,7 +162,7 @@ module Gitlab
# the current Sidekiq process
def current_worker_queue_mappings
worker_queue_mappings
- .select { |worker, queue| Sidekiq[:queues].include?(queue) }
+ .select { |worker, queue| Sidekiq.default_configuration.queues.include?(queue) }
.to_h
end
diff --git a/lib/gitlab/sidekiq_config/cli_methods.rb b/lib/gitlab/sidekiq_config/cli_methods.rb
index c49180a6c1c..5c69a87f366 100644
--- a/lib/gitlab/sidekiq_config/cli_methods.rb
+++ b/lib/gitlab/sidekiq_config/cli_methods.rb
@@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'yaml'
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
# These methods are called by `sidekiq-cluster`, which runs outside of
# the bundler/Rails context, so we cannot use any gem or Rails methods.
diff --git a/lib/gitlab/sidekiq_logging/structured_logger.rb b/lib/gitlab/sidekiq_logging/structured_logger.rb
index c65d9c5ddd5..4754417639f 100644
--- a/lib/gitlab/sidekiq_logging/structured_logger.rb
+++ b/lib/gitlab/sidekiq_logging/structured_logger.rb
@@ -16,11 +16,11 @@ module Gitlab
ActiveRecord::LogSubscriber.reset_runtime
- Sidekiq.logger.info log_job_start(job, base_payload)
+ @logger.info log_job_start(job, base_payload)
yield
- Sidekiq.logger.info log_job_done(job, started_time, base_payload)
+ @logger.info log_job_done(job, started_time, base_payload)
rescue Sidekiq::JobRetry::Handled => job_exception
# Sidekiq::JobRetry::Handled is raised by the internal Sidekiq
# processor. It is a wrapper around real exception indicating an
@@ -29,11 +29,11 @@ module Gitlab
#
# For more information:
# https://github.com/mperham/sidekiq/blob/v5.2.7/lib/sidekiq/processor.rb#L173
- Sidekiq.logger.warn log_job_done(job, started_time, base_payload, job_exception.cause || job_exception)
+ @logger.warn log_job_done(job, started_time, base_payload, job_exception.cause || job_exception)
raise
rescue StandardError => job_exception
- Sidekiq.logger.warn log_job_done(job, started_time, base_payload, job_exception)
+ @logger.warn log_job_done(job, started_time, base_payload, job_exception)
raise
end
diff --git a/lib/gitlab/sidekiq_middleware/server_metrics.rb b/lib/gitlab/sidekiq_middleware/server_metrics.rb
index 37a9ed37891..e65761fc1b6 100644
--- a/lib/gitlab/sidekiq_middleware/server_metrics.rb
+++ b/lib/gitlab/sidekiq_middleware/server_metrics.rb
@@ -64,7 +64,7 @@ module Gitlab
def initialize_process_metrics
metrics = self.metrics
- metrics[:sidekiq_concurrency].set({}, Sidekiq[:concurrency].to_i)
+ metrics[:sidekiq_concurrency].set({}, Sidekiq.default_configuration[:concurrency].to_i)
return unless ::Feature.enabled?(:sidekiq_job_completion_metric_initialize)
diff --git a/lib/gitlab/sidekiq_migrate_jobs.rb b/lib/gitlab/sidekiq_migrate_jobs.rb
index 2467dd7ca43..cf4893b8745 100644
--- a/lib/gitlab/sidekiq_migrate_jobs.rb
+++ b/lib/gitlab/sidekiq_migrate_jobs.rb
@@ -16,17 +16,14 @@ module Gitlab
# Migrate jobs in SortedSets, i.e. scheduled and retry sets.
def migrate_set(sidekiq_set)
source_queues_regex = Regexp.union(mappings.keys)
- cursor = 0
scanned = 0
migrated = 0
estimated_size = Sidekiq.redis { |c| c.zcard(sidekiq_set) }
logger&.info("Processing #{sidekiq_set} set. Estimated size: #{estimated_size}.")
- begin
- cursor, jobs = Sidekiq.redis { |c| c.zscan(sidekiq_set, cursor) }
-
- jobs.each do |(job, score)|
+ Sidekiq.redis do |c|
+ c.zscan(sidekiq_set) do |job, score|
if scanned > 0 && scanned % LOG_FREQUENCY == 0
logger&.info("In progress. Scanned records: #{scanned}. Migrated records: #{migrated}.")
end
@@ -45,7 +42,7 @@ module Gitlab
migrated += migrate_job_in_set(sidekiq_set, job, score, job_hash)
end
- end while cursor.to_i != 0
+ end
logger&.info("Done. Scanned records: #{scanned}. Migrated records: #{migrated}.")
@@ -61,7 +58,7 @@ module Gitlab
logger&.info("List of queues based on routing rules: #{routing_rules_queues}")
Sidekiq.redis do |conn|
# Redis 6 supports conn.scan_each(match: "queue:*", type: 'list')
- conn.scan_each(match: "queue:*") do |key|
+ conn.scan("MATCH", "queue:*") do |key|
# Redis 5 compatibility
next unless conn.type(key) == 'list'
@@ -101,13 +98,9 @@ module Gitlab
Sidekiq.redis do |connection|
removed = connection.zrem(sidekiq_set, job)
- if removed
- connection.zadd(sidekiq_set, score, Gitlab::Json.dump(job_hash))
+ connection.zadd(sidekiq_set, score, Gitlab::Json.dump(job_hash)) if removed > 0
- 1
- else
- 0
- end
+ removed
end
end
diff --git a/lib/gitlab/sidekiq_status.rb b/lib/gitlab/sidekiq_status.rb
index 496ed9de828..c25e4e776cd 100644
--- a/lib/gitlab/sidekiq_status.rb
+++ b/lib/gitlab/sidekiq_status.rb
@@ -37,7 +37,7 @@ module Gitlab
return unless expire
with_redis do |redis|
- redis.set(key_for(jid), 1, ex: expire)
+ redis.set(key_for(jid), 1, ex: expire.to_i)
end
end
@@ -56,7 +56,7 @@ module Gitlab
# expire - The expiration time of the Redis key.
def self.expire(jid, expire = DEFAULT_EXPIRATION)
with_redis do |redis|
- redis.expire(key_for(jid), expire)
+ redis.expire(key_for(jid), expire.to_i)
end
end
diff --git a/lib/tasks/gitlab/cleanup.rake b/lib/tasks/gitlab/cleanup.rake
index 808c88f3599..ecf5b10690a 100644
--- a/lib/tasks/gitlab/cleanup.rake
+++ b/lib/tasks/gitlab/cleanup.rake
@@ -1,7 +1,7 @@
# frozen_string_literal: true
namespace :gitlab do
- require 'set'
+ require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
namespace :cleanup do
desc "GitLab | Cleanup | Block users that have been removed in LDAP"
diff --git a/qa/Gemfile.lock b/qa/Gemfile.lock
index 8f9f3a7ccda..a1372ed4292 100644
--- a/qa/Gemfile.lock
+++ b/qa/Gemfile.lock
@@ -5,7 +5,7 @@ PATH
actionview (>= 6.1.7.2)
activesupport (>= 6.1.7.2)
addressable (~> 2.8)
- nokogiri (~> 1.15.2)
+ nokogiri (>= 1.15)
rake (~> 13.0)
GEM
@@ -273,7 +273,7 @@ GEM
rspec-expectations (3.12.0)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.12.0)
- rspec-mocks (3.12.0)
+ rspec-mocks (3.12.6)
diff-lcs (>= 1.2.0, < 2.0)
rspec-support (~> 3.12.0)
rspec-parameterized (1.0.0)
diff --git a/rubocop/feature_categories.rb b/rubocop/feature_categories.rb
index 5e02d974e7b..b74b1c5254e 100644
--- a/rubocop/feature_categories.rb
+++ b/rubocop/feature_categories.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
require 'yaml'
require 'digest/sha2'
require 'did_you_mean'
diff --git a/rubocop/formatter/todo_formatter.rb b/rubocop/formatter/todo_formatter.rb
index 9e20a95ba85..dbfdec09662 100644
--- a/rubocop/formatter/todo_formatter.rb
+++ b/rubocop/formatter/todo_formatter.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
require 'rubocop'
require 'yaml'
diff --git a/scripts/failed_tests.rb b/scripts/failed_tests.rb
index f828155f6f0..beb0a21637a 100755
--- a/scripts/failed_tests.rb
+++ b/scripts/failed_tests.rb
@@ -5,7 +5,7 @@ require 'optparse'
require 'fileutils'
require 'uri'
require 'json'
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
class FailedTests
DEFAULT_OPTIONS = {
diff --git a/scripts/feature_flags/used-feature-flags b/scripts/feature_flags/used-feature-flags
index 7bfe4a89634..b3005c3cba5 100755
--- a/scripts/feature_flags/used-feature-flags
+++ b/scripts/feature_flags/used-feature-flags
@@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
require 'fileutils'
require_relative '../../lib/gitlab_edition'
diff --git a/scripts/merge-auto-explain-logs b/scripts/merge-auto-explain-logs
index 114afc580d0..9379189a9d3 100755
--- a/scripts/merge-auto-explain-logs
+++ b/scripts/merge-auto-explain-logs
@@ -2,7 +2,7 @@
# frozen_string_literal: true
require 'json'
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
require 'zlib'
# Load query analyzers
diff --git a/scripts/setup/generate-as-if-foss-env.rb b/scripts/setup/generate-as-if-foss-env.rb
index ee688e4f1d0..9e93b470558 100755
--- a/scripts/setup/generate-as-if-foss-env.rb
+++ b/scripts/setup/generate-as-if-foss-env.rb
@@ -3,7 +3,7 @@
# In spec/scripts/setup/generate_as_if_foss_env_spec.rb we completely stub it
require 'gitlab' unless Object.const_defined?(:Gitlab)
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
class GenerateAsIfFossEnv
def initialize
diff --git a/scripts/verify-tff-mapping b/scripts/verify-tff-mapping
index 8771b112021..abbf30c7e2d 100755
--- a/scripts/verify-tff-mapping
+++ b/scripts/verify-tff-mapping
@@ -1,7 +1,7 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
require 'test_file_finder'
# These tests run a sanity check on the mapping file `tests.yml`
diff --git a/spec/factories/ci/runners.rb b/spec/factories/ci/runners.rb
index 2d67a4c0e80..63e8cec82e6 100644
--- a/spec/factories/ci/runners.rb
+++ b/spec/factories/ci/runners.rb
@@ -14,6 +14,7 @@ FactoryBot.define do
groups { [] }
projects { [] }
token_expires_at { nil }
+ creator { nil }
end
after(:build) do |runner, evaluator|
@@ -24,6 +25,8 @@ FactoryBot.define do
evaluator.groups.each do |group|
runner.runner_namespaces << build(:ci_runner_namespace, runner: runner, namespace: group)
end
+
+ runner.creator = evaluator.creator if evaluator.creator
end
after(:create) do |runner, evaluator|
diff --git a/spec/factories/users.rb b/spec/factories/users.rb
index 03dbe04b337..7ade859dcf2 100644
--- a/spec/factories/users.rb
+++ b/spec/factories/users.rb
@@ -20,8 +20,6 @@ FactoryBot.define do
true
end
- assign_ns &&= Feature.enabled?(:create_personal_ns_outside_model, Feature.current_request)
-
user.assign_personal_namespace if assign_ns
end
diff --git a/spec/frontend/editor/schema/ci/ci_schema_spec.js b/spec/frontend/editor/schema/ci/ci_schema_spec.js
index 7986509074e..949cf1367ff 100644
--- a/spec/frontend/editor/schema/ci/ci_schema_spec.js
+++ b/spec/frontend/editor/schema/ci/ci_schema_spec.js
@@ -38,8 +38,8 @@ import SecretsYaml from './yaml_tests/positive_tests/secrets.yml';
import ServicesYaml from './yaml_tests/positive_tests/services.yml';
import NeedsParallelMatrixYaml from './yaml_tests/positive_tests/needs_parallel_matrix.yml';
import ScriptYaml from './yaml_tests/positive_tests/script.yml';
-import AutoCancelPipelineOnJobFailureAllYaml from './yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/all.yml';
-import AutoCancelPipelineOnJobFailureNoneYaml from './yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/none.yml';
+import WorkflowAutoCancelOnJobFailureYaml from './yaml_tests/positive_tests/workflow/auto_cancel/on_job_failure.yml';
+import WorkflowAutoCancelOnNewCommitYaml from './yaml_tests/positive_tests/workflow/auto_cancel/on_new_commit.yml';
// YAML NEGATIVE TEST
import ArtifactsNegativeYaml from './yaml_tests/negative_tests/artifacts.yml';
@@ -66,7 +66,8 @@ import NeedsParallelMatrixNumericYaml from './yaml_tests/negative_tests/needs/pa
import NeedsParallelMatrixWrongParallelValueYaml from './yaml_tests/negative_tests/needs/parallel_matrix/wrong_parallel_value.yml';
import NeedsParallelMatrixWrongMatrixValueYaml from './yaml_tests/negative_tests/needs/parallel_matrix/wrong_matrix_value.yml';
import ScriptNegativeYaml from './yaml_tests/negative_tests/script.yml';
-import AutoCancelPipelineNegativeYaml from './yaml_tests/negative_tests/auto_cancel_pipeline.yml';
+import WorkflowAutoCancelOnJobFailureNegativeYaml from './yaml_tests/negative_tests/workflow/auto_cancel/on_job_failure.yml';
+import WorkflowAutoCancelOnNewCommitNegativeYaml from './yaml_tests/negative_tests/workflow/auto_cancel/on_new_commit.yml';
const ajv = new Ajv({
strictTypes: false,
@@ -110,8 +111,8 @@ describe('positive tests', () => {
SecretsYaml,
NeedsParallelMatrixYaml,
ScriptYaml,
- AutoCancelPipelineOnJobFailureAllYaml,
- AutoCancelPipelineOnJobFailureNoneYaml,
+ WorkflowAutoCancelOnJobFailureYaml,
+ WorkflowAutoCancelOnNewCommitYaml,
}),
)('schema validates %s', (_, input) => {
// We construct a new "JSON" from each main key that is inside a
@@ -157,7 +158,8 @@ describe('negative tests', () => {
NeedsParallelMatrixWrongParallelValueYaml,
NeedsParallelMatrixWrongMatrixValueYaml,
ScriptNegativeYaml,
- AutoCancelPipelineNegativeYaml,
+ WorkflowAutoCancelOnJobFailureNegativeYaml,
+ WorkflowAutoCancelOnNewCommitNegativeYaml,
}),
)('schema validates %s', (_, input) => {
// We construct a new "JSON" from each main key that is inside a
diff --git a/spec/frontend/editor/schema/ci/yaml_tests/negative_tests/auto_cancel_pipeline.yml b/spec/frontend/editor/schema/ci/yaml_tests/negative_tests/workflow/auto_cancel/on_job_failure.yml
index 0ba3e5632e3..2bf9effe1be 100644
--- a/spec/frontend/editor/schema/ci/yaml_tests/negative_tests/auto_cancel_pipeline.yml
+++ b/spec/frontend/editor/schema/ci/yaml_tests/negative_tests/workflow/auto_cancel/on_job_failure.yml
@@ -1,4 +1,3 @@
-# invalid workflow:auto-cancel:on-job-failure
workflow:
auto_cancel:
on_job_failure: unexpected_value
diff --git a/spec/frontend/editor/schema/ci/yaml_tests/negative_tests/workflow/auto_cancel/on_new_commit.yml b/spec/frontend/editor/schema/ci/yaml_tests/negative_tests/workflow/auto_cancel/on_new_commit.yml
new file mode 100644
index 00000000000..371662efd24
--- /dev/null
+++ b/spec/frontend/editor/schema/ci/yaml_tests/negative_tests/workflow/auto_cancel/on_new_commit.yml
@@ -0,0 +1,3 @@
+workflow:
+ auto_cancel:
+ on_new_commit: unexpected_value
diff --git a/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/none.yml b/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/none.yml
deleted file mode 100644
index b99eb50e962..00000000000
--- a/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/none.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-# valid workflow:auto-cancel:on-job-failure
-workflow:
- auto_cancel:
- on_job_failure: none
diff --git a/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/all.yml b/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/workflow/auto_cancel/on_job_failure.yml
index bf84ff16f42..79d18f40721 100644
--- a/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/auto_cancel_pipeline/on_job_failure/all.yml
+++ b/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/workflow/auto_cancel/on_job_failure.yml
@@ -1,4 +1,3 @@
-# valid workflow:auto-cancel:on-job-failure
workflow:
auto_cancel:
on_job_failure: all
diff --git a/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/workflow/auto_cancel/on_new_commit.yml b/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/workflow/auto_cancel/on_new_commit.yml
new file mode 100644
index 00000000000..a1641878e4d
--- /dev/null
+++ b/spec/frontend/editor/schema/ci/yaml_tests/positive_tests/workflow/auto_cancel/on_new_commit.yml
@@ -0,0 +1,3 @@
+workflow:
+ auto_cancel:
+ on_new_commit: conservative
diff --git a/spec/graphql/resolvers/ci/group_runners_resolver_spec.rb b/spec/graphql/resolvers/ci/group_runners_resolver_spec.rb
index d1eec0baeea..d1726c8da6c 100644
--- a/spec/graphql/resolvers/ci/group_runners_resolver_spec.rb
+++ b/spec/graphql/resolvers/ci/group_runners_resolver_spec.rb
@@ -87,7 +87,7 @@ RSpec.describe Resolvers::Ci::GroupRunnersResolver, feature_category: :fleet_vis
status_status: 'active',
type_type: :group_type,
tag_name: ['active_runner'],
- preload: false,
+ preload: {},
search: 'abc',
sort: 'contacted_asc',
membership: :descendants,
diff --git a/spec/graphql/resolvers/ci/project_runners_resolver_spec.rb b/spec/graphql/resolvers/ci/project_runners_resolver_spec.rb
index 85b55521174..59ba7d4200c 100644
--- a/spec/graphql/resolvers/ci/project_runners_resolver_spec.rb
+++ b/spec/graphql/resolvers/ci/project_runners_resolver_spec.rb
@@ -74,7 +74,7 @@ RSpec.describe Resolvers::Ci::ProjectRunnersResolver, feature_category: :fleet_v
status_status: 'active',
type_type: :group_type,
tag_name: ['active_runner'],
- preload: false,
+ preload: {},
search: 'abc',
sort: 'contacted_asc',
project: project
diff --git a/spec/graphql/resolvers/ci/runners_resolver_spec.rb b/spec/graphql/resolvers/ci/runners_resolver_spec.rb
index 85a90924384..a0239a6ff34 100644
--- a/spec/graphql/resolvers/ci/runners_resolver_spec.rb
+++ b/spec/graphql/resolvers/ci/runners_resolver_spec.rb
@@ -98,7 +98,7 @@ RSpec.describe Resolvers::Ci::RunnersResolver, feature_category: :fleet_visibili
upgrade_status: 'recommended',
type_type: :instance_type,
tag_name: ['active_runner'],
- preload: false,
+ preload: {},
search: 'abc',
sort: 'contacted_asc',
creator_id: '1',
@@ -125,7 +125,7 @@ RSpec.describe Resolvers::Ci::RunnersResolver, feature_category: :fleet_visibili
let(:expected_params) do
{
active: false,
- preload: false
+ preload: {}
}
end
@@ -145,7 +145,7 @@ RSpec.describe Resolvers::Ci::RunnersResolver, feature_category: :fleet_visibili
let(:expected_params) do
{
active: false,
- preload: false
+ preload: {}
}
end
@@ -163,7 +163,7 @@ RSpec.describe Resolvers::Ci::RunnersResolver, feature_category: :fleet_visibili
end
let(:expected_params) do
- { preload: false }
+ { preload: {} }
end
it 'calls RunnersFinder with expected arguments' do
@@ -181,7 +181,7 @@ RSpec.describe Resolvers::Ci::RunnersResolver, feature_category: :fleet_visibili
let(:expected_params) do
{
- preload: false,
+ preload: {},
version_prefix: 'a.b'
}
end
diff --git a/spec/lib/gitlab/background_migration/backfill_vs_code_settings_version_spec.rb b/spec/lib/gitlab/background_migration/backfill_vs_code_settings_version_spec.rb
new file mode 100644
index 00000000000..725cd7f4bca
--- /dev/null
+++ b/spec/lib/gitlab/background_migration/backfill_vs_code_settings_version_spec.rb
@@ -0,0 +1,84 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Gitlab::BackgroundMigration::BackfillVsCodeSettingsVersion, schema: 20231212135235, feature_category: :web_ide do
+ let(:vs_code_settings) { table(:vs_code_settings) }
+
+ let(:users) { table(:users) }
+
+ let(:user) do
+ users.create!(
+ email: "test1@example.com",
+ username: "test1",
+ notification_email: "test@example.com",
+ name: "test",
+ state: "active",
+ projects_limit: 10)
+ end
+
+ let(:persistent_settings) { VsCode::Settings::SETTINGS_TYPES.filter { |type| type != 'machines' } }
+
+ subject(:migration) do
+ described_class.new(
+ start_id: vs_code_settings.first.id,
+ end_id: vs_code_settings.last.id,
+ batch_table: :vs_code_settings,
+ batch_column: :id,
+ sub_batch_size: 100,
+ pause_ms: 0,
+ connection: ActiveRecord::Base.connection
+ )
+ end
+
+ describe "#perform" do
+ context 'when it finds vs_code_setting rows with version that is nil or zero' do
+ let(:settings) do
+ persistent_settings.each_with_index.map do |type, index|
+ vs_code_settings.create!(user_id: user.id,
+ setting_type: type,
+ content: '{}',
+ uuid: SecureRandom.uuid,
+ version: index.odd? ? nil : 0)
+ end
+ end
+
+ it 'sets version field with default value for setting type' do
+ settings.each do |setting|
+ expect(setting.version).to eq(nil).or eq(0)
+ end
+
+ migration.perform
+
+ settings.each do |setting|
+ expect(setting.reload.version)
+ .to eq(described_class::VsCodeSetting::DEFAULT_SETTING_VERSIONS[setting.setting_type])
+ end
+ end
+ end
+
+ context 'when it finds vs_code_setting rows with version that is not nil or zero' do
+ let(:settings) do
+ persistent_settings.map do |type|
+ vs_code_settings.create!(user_id: user.id,
+ setting_type: type,
+ content: '{}',
+ uuid: SecureRandom.uuid,
+ version: 1)
+ end
+ end
+
+ it 'does not set version field' do
+ settings.each do |setting|
+ expect(setting.version).to eq(1)
+ end
+
+ migration.perform
+
+ settings.each do |setting|
+ expect(setting.reload.version).to eq(1)
+ end
+ end
+ end
+ end
+end
diff --git a/spec/lib/gitlab/ci/config/entry/auto_cancel_spec.rb b/spec/lib/gitlab/ci/config/entry/auto_cancel_spec.rb
index bdd66cc00a1..764908ee040 100644
--- a/spec/lib/gitlab/ci/config/entry/auto_cancel_spec.rb
+++ b/spec/lib/gitlab/ci/config/entry/auto_cancel_spec.rb
@@ -25,7 +25,7 @@ RSpec.describe Gitlab::Ci::Config::Entry::AutoCancel, feature_category: :pipelin
it 'returns errors' do
expect(config.errors)
- .to include('auto cancel on new commit must be one of: conservative, interruptible, disabled')
+ .to include('auto cancel on new commit must be one of: conservative, interruptible, none')
end
end
end
diff --git a/spec/lib/gitlab/diff/rendered/notebook/diff_file_helper_spec.rb b/spec/lib/gitlab/diff/rendered/notebook/diff_file_helper_spec.rb
index ad92d90e253..4dd29e1fb15 100644
--- a/spec/lib/gitlab/diff/rendered/notebook/diff_file_helper_spec.rb
+++ b/spec/lib/gitlab/diff/rendered/notebook/diff_file_helper_spec.rb
@@ -2,7 +2,7 @@
require 'fast_spec_helper'
require 'rspec-parameterized'
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
MOCK_LINE = Struct.new(:text, :type, :index, :old_pos, :new_pos)
diff --git a/spec/lib/gitlab/instrumentation/redis_client_middleware_spec.rb b/spec/lib/gitlab/instrumentation/redis_client_middleware_spec.rb
new file mode 100644
index 00000000000..eca75d93c80
--- /dev/null
+++ b/spec/lib/gitlab/instrumentation/redis_client_middleware_spec.rb
@@ -0,0 +1,224 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+require 'rspec-parameterized'
+require 'support/helpers/rails_helpers'
+
+RSpec.describe Gitlab::Instrumentation::RedisClientMiddleware, :request_store, feature_category: :scalability do
+ using RSpec::Parameterized::TableSyntax
+ include RedisHelpers
+
+ let_it_be(:redis_store_class) { define_helper_redis_store_class }
+ let_it_be(:redis_client) { RedisClient.new(redis_store_class.redis_client_params) }
+
+ before do
+ redis_client.call("flushdb")
+ end
+
+ describe 'read and write' do
+ where(:setup, :command, :expect_write, :expect_read) do
+ # The response is 'OK', the request size is the combined size of array
+ # elements. Exercise counting of a status reply.
+ [] | [:set, 'foo', 'bar'] | (3 + 3 + 3) | 2
+
+ # The response is 1001, so 4 bytes. Exercise counting an integer reply.
+ [[:set, 'foobar', 1000]] | [:incr, 'foobar'] | (4 + 6) | 4
+
+ # Exercise counting empty multi bulk reply. Returns an empty hash `{}`
+ [] | [:hgetall, 'foobar'] | (7 + 6) | 2
+
+ # Hgetall response length is combined length of keys and values in the
+ # hash. Exercises counting of a multi bulk reply
+ # Returns `{"field"=>"hello world"}`, 5 for field, 11 for hello world, 8 for {, }, 4 "s, =, >
+ [[:hset, 'myhash', 'field', 'hello world']] | [:hgetall, 'myhash'] | (7 + 6) | (5 + 11 + 8)
+
+ # Exercise counting of a bulk reply
+ [[:set, 'foo', 'bar' * 100]] | [:get, 'foo'] | (3 + 3) | (3 * 100)
+
+ # Nested array response: [['foo', 0.0], ['bar', 1.0]]. Returns scores as float.
+ [[:zadd, 'myset', 0, 'foo'],
+ [:zadd, 'myset', 1, 'bar']] | [:zrange, 'myset', 0, -1, 'withscores'] | (6 + 5 + 1 + 2 + 10) | (3 + 3 + 3 + 3)
+ end
+
+ with_them do
+ it 'counts bytes read and written' do
+ setup.each { |cmd| redis_client.call(*cmd) }
+ RequestStore.clear!
+ redis_client.call(*command)
+
+ expect(Gitlab::Instrumentation::Redis.read_bytes).to eq(expect_read)
+ expect(Gitlab::Instrumentation::Redis.write_bytes).to eq(expect_write)
+ end
+ end
+ end
+
+ describe 'counting' do
+ let(:instrumentation_class) { redis_store_class.instrumentation_class }
+
+ it 'counts successful requests' do
+ expect(instrumentation_class).to receive(:instance_count_request).with(1).and_call_original
+
+ redis_client.call(:get, 'foobar')
+ end
+
+ it 'counts successful pipelined requests' do
+ expect(instrumentation_class).to receive(:instance_count_request).with(2).and_call_original
+ expect(instrumentation_class).to receive(:instance_count_pipelined_request).with(2).and_call_original
+
+ redis_client.pipelined do |pipeline|
+ pipeline.call(:get, '{foobar}buz')
+ pipeline.call(:get, '{foobar}baz')
+ end
+ end
+
+ context 'when encountering exceptions' do
+ before do
+ allow(redis_client.instance_variable_get(:@raw_connection)).to receive(:call).and_raise(
+ RedisClient::ConnectionError, 'Connection was closed or lost')
+ end
+
+ it 'counts exception' do
+ expect(instrumentation_class).to receive(:instance_count_exception)
+ .with(instance_of(RedisClient::ConnectionError)).and_call_original
+ expect(instrumentation_class).to receive(:log_exception)
+ .with(instance_of(RedisClient::ConnectionError)).and_call_original
+ expect(instrumentation_class).to receive(:instance_count_request).and_call_original
+
+ expect do
+ redis_client.call(:auth, 'foo', 'bar')
+ end.to raise_error(RedisClient::Error)
+ end
+ end
+
+ context 'in production environment' do
+ before do
+ stub_rails_env('production') # to avoid raising CrossSlotError
+ end
+
+ it 'counts disallowed cross-slot requests' do
+ expect(instrumentation_class).to receive(:increment_cross_slot_request_count).and_call_original
+ expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
+
+ redis_client.call(:mget, 'foo', 'bar')
+ end
+
+ it 'does not count allowed cross-slot requests' do
+ expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
+ expect(instrumentation_class).to receive(:increment_allowed_cross_slot_request_count).and_call_original
+
+ Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
+ redis_client.call(:mget, 'foo', 'bar')
+ end
+ end
+
+ it 'does not count allowed non-cross-slot requests' do
+ expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
+ expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
+
+ Gitlab::Instrumentation::RedisClusterValidator.allow_cross_slot_commands do
+ redis_client.call(:mget, 'bar')
+ end
+ end
+
+ it 'skips count for non-cross-slot requests' do
+ expect(instrumentation_class).not_to receive(:increment_cross_slot_request_count).and_call_original
+ expect(instrumentation_class).not_to receive(:increment_allowed_cross_slot_request_count).and_call_original
+
+ redis_client.call(:mget, '{foo}bar', '{foo}baz')
+ end
+ end
+
+ context 'without active RequestStore' do
+ before do
+ ::RequestStore.end!
+ end
+
+ it 'still runs cross-slot validation' do
+ expect do
+ redis_client.call('mget', 'foo', 'bar')
+ end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
+ end
+ end
+ end
+
+ describe 'latency' do
+ let(:instrumentation_class) { redis_store_class.instrumentation_class }
+
+ describe 'commands in the apdex' do
+ where(:command) do
+ [
+ [[:get, 'foobar']],
+ [%w[GET foobar]]
+ ]
+ end
+
+ with_them do
+ it 'measures requests we want in the apdex' do
+ expect(instrumentation_class).to receive(:instance_observe_duration).with(a_value > 0)
+ .and_call_original
+
+ redis_client.call(*command)
+ end
+ end
+
+ context 'with pipelined commands' do
+ it 'measures requests that do not have blocking commands' do
+ expect(instrumentation_class).to receive(:instance_observe_duration).twice.with(a_value > 0)
+ .and_call_original
+
+ redis_client.pipelined do |pipeline|
+ pipeline.call(:get, '{foobar}buz')
+ pipeline.call(:get, '{foobar}baz')
+ end
+ end
+
+ it 'raises error when keys are not from the same slot' do
+ expect do
+ redis_client.pipelined do |pipeline|
+ pipeline.call(:get, 'foo')
+ pipeline.call(:get, 'bar')
+ end
+ end.to raise_error(instance_of(Gitlab::Instrumentation::RedisClusterValidator::CrossSlotError))
+ end
+ end
+ end
+
+ describe 'commands not in the apdex' do
+ where(:setup, :command) do
+ [['rpush', 'foobar', 1]] | ['brpop', 'foobar', 0]
+ [['rpush', 'foobar', 1]] | ['blpop', 'foobar', 0]
+ [['rpush', '{abc}foobar', 1]] | ['brpoplpush', '{abc}foobar', '{abc}bazqux', 0]
+ [['rpush', '{abc}foobar', 1]] | ['brpoplpush', '{abc}foobar', '{abc}bazqux', 0]
+ [['zadd', 'foobar', 1, 'a']] | ['bzpopmin', 'foobar', 0]
+ [['zadd', 'foobar', 1, 'a']] | ['bzpopmax', 'foobar', 0]
+ [['xadd', 'mystream', 1, 'myfield', 'mydata']] | ['xread', 'block', 1, 'streams', 'mystream', '0-0']
+ [['xadd', 'foobar', 1, 'myfield', 'mydata'],
+ ['xgroup', 'create', 'foobar', 'mygroup',
+ 0]] | ['xreadgroup', 'group', 'mygroup', 'myconsumer', 'block', 1, 'streams', 'foobar', '0-0']
+ [] | ['command']
+ end
+
+ with_them do
+ it 'skips requests we do not want in the apdex' do
+ setup.each { |cmd| redis_client.call(*cmd) }
+
+ expect(instrumentation_class).not_to receive(:instance_observe_duration)
+
+ redis_client.call(*command)
+ end
+ end
+
+ context 'with pipelined commands' do
+ it 'skips requests that have blocking commands' do
+ expect(instrumentation_class).not_to receive(:instance_observe_duration)
+
+ redis_client.pipelined do |pipeline|
+ pipeline.call(:get, '{foobar}buz')
+ pipeline.call(:rpush, '{foobar}baz', 1)
+ pipeline.call(:brpop, '{foobar}baz', 0)
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/spec/lib/gitlab/memory/watchdog/handlers/sidekiq_handler_spec.rb b/spec/lib/gitlab/memory/watchdog/handlers/sidekiq_handler_spec.rb
index 68dd784fb7e..1c62f5679d0 100644
--- a/spec/lib/gitlab/memory/watchdog/handlers/sidekiq_handler_spec.rb
+++ b/spec/lib/gitlab/memory/watchdog/handlers/sidekiq_handler_spec.rb
@@ -12,7 +12,7 @@ RSpec.describe Gitlab::Memory::Watchdog::Handlers::SidekiqHandler, feature_categ
before do
allow(Gitlab::Metrics::System).to receive(:monotonic_time)
- .and_return(0, 1, shutdown_timeout_seconds, 0, 1, Sidekiq[:timeout] + 2)
+ .and_return(0, 1, shutdown_timeout_seconds, 0, 1, Sidekiq.default_configuration[:timeout] + 2)
allow(Process).to receive(:kill)
allow(::Sidekiq).to receive(:logger).and_return(logger)
allow(logger).to receive(:warn)
@@ -81,7 +81,7 @@ RSpec.describe Gitlab::Memory::Watchdog::Handlers::SidekiqHandler, feature_categ
let(:signal_params) do
[
[:TSTP, pid, 'stop fetching new jobs', shutdown_timeout_seconds],
- [:TERM, pid, 'gracefully shut down', Sidekiq[:timeout] + 2]
+ [:TERM, pid, 'gracefully shut down', Sidekiq.default_configuration[:timeout] + 2]
]
end
@@ -95,7 +95,7 @@ RSpec.describe Gitlab::Memory::Watchdog::Handlers::SidekiqHandler, feature_categ
let(:signal_params) do
[
[:TSTP, pid, 'stop fetching new jobs', shutdown_timeout_seconds],
- [:TERM, pid, 'gracefully shut down', Sidekiq[:timeout] + 2],
+ [:TERM, pid, 'gracefully shut down', Sidekiq.default_configuration[:timeout] + 2],
[:KILL, kill_pid, 'hard shut down', nil]
]
end
diff --git a/spec/lib/gitlab/runtime_spec.rb b/spec/lib/gitlab/runtime_spec.rb
index 05bcdf2fc96..bd5914c9df8 100644
--- a/spec/lib/gitlab/runtime_spec.rb
+++ b/spec/lib/gitlab/runtime_spec.rb
@@ -127,10 +127,10 @@ RSpec.describe Gitlab::Runtime, feature_category: :cloud_connector do
before do
stub_const('::Sidekiq', sidekiq_type)
allow(sidekiq_type).to receive(:server?).and_return(true)
- allow(sidekiq_type).to receive(:[]).with(:concurrency).and_return(2)
+ allow(sidekiq_type).to receive(:default_configuration).and_return({ concurrency: 2 })
end
- it_behaves_like "valid runtime", :sidekiq, 5
+ it_behaves_like "valid runtime", :sidekiq, 2
it 'identifies as an application runtime' do
expect(described_class.application?).to be true
diff --git a/spec/lib/gitlab/sidekiq_config_spec.rb b/spec/lib/gitlab/sidekiq_config_spec.rb
index 5885151ecb5..f741fd8fae9 100644
--- a/spec/lib/gitlab/sidekiq_config_spec.rb
+++ b/spec/lib/gitlab/sidekiq_config_spec.rb
@@ -186,7 +186,8 @@ RSpec.describe Gitlab::SidekiqConfig do
allow(::Gitlab::SidekiqConfig::WorkerRouter)
.to receive(:global).and_return(::Gitlab::SidekiqConfig::WorkerRouter.new(test_routes))
- allow(Sidekiq).to receive(:[]).with(:queues).and_return(%w[default background_migration])
+ allow(Sidekiq).to receive_message_chain(:default_configuration, :queues)
+ .and_return(%w[default background_migration])
mappings = described_class.current_worker_queue_mappings
diff --git a/spec/lib/gitlab/sidekiq_logging/structured_logger_spec.rb b/spec/lib/gitlab/sidekiq_logging/structured_logger_spec.rb
index 2e07fa100e8..b1a8a9f4da3 100644
--- a/spec/lib/gitlab/sidekiq_logging/structured_logger_spec.rb
+++ b/spec/lib/gitlab/sidekiq_logging/structured_logger_spec.rb
@@ -492,7 +492,7 @@ RSpec.describe Gitlab::SidekiqLogging::StructuredLogger do
'completed_at' => current_utc_time.to_i }
end
- subject { described_class.new }
+ subject { described_class.new(Sidekiq.logger) }
it 'update payload correctly' do
travel_to(current_utc_time) do
diff --git a/spec/lib/gitlab/sidekiq_middleware/server_metrics_spec.rb b/spec/lib/gitlab/sidekiq_middleware/server_metrics_spec.rb
index 9cf9901007c..e1662903fa4 100644
--- a/spec/lib/gitlab/sidekiq_middleware/server_metrics_spec.rb
+++ b/spec/lib/gitlab/sidekiq_middleware/server_metrics_spec.rb
@@ -10,7 +10,7 @@ RSpec.describe Gitlab::SidekiqMiddleware::ServerMetrics, feature_category: :shar
describe '.initialize_process_metrics' do
it 'sets concurrency metrics' do
- expect(concurrency_metric).to receive(:set).with({}, Sidekiq[:concurrency].to_i)
+ expect(concurrency_metric).to receive(:set).with({}, Sidekiq.default_configuration[:concurrency].to_i)
described_class.initialize_process_metrics
end
@@ -122,7 +122,7 @@ RSpec.describe Gitlab::SidekiqMiddleware::ServerMetrics, feature_category: :shar
end
it 'sets the concurrency metric' do
- expect(concurrency_metric).to receive(:set).with({}, Sidekiq[:concurrency].to_i)
+ expect(concurrency_metric).to receive(:set).with({}, Sidekiq.default_configuration[:concurrency].to_i)
described_class.initialize_process_metrics
end
diff --git a/spec/lib/gitlab/sidekiq_migrate_jobs_spec.rb b/spec/lib/gitlab/sidekiq_migrate_jobs_spec.rb
index bf379d9cb0d..96d4042b1e6 100644
--- a/spec/lib/gitlab/sidekiq_migrate_jobs_spec.rb
+++ b/spec/lib/gitlab/sidekiq_migrate_jobs_spec.rb
@@ -22,7 +22,7 @@ RSpec.describe Gitlab::SidekiqMigrateJobs, :clean_gitlab_redis_queues,
let(:migrator) { described_class.new(mappings) }
let(:set_after) do
- Sidekiq.redis { |c| c.zrange(set_name, 0, -1, with_scores: true) }
+ Sidekiq.redis { |c| c.call("ZRANGE", set_name, 0, -1, "WITHSCORES") }
.map { |item, score| [Gitlab::Json.load(item), score] }
end
@@ -226,8 +226,9 @@ RSpec.describe Gitlab::SidekiqMigrateJobs, :clean_gitlab_redis_queues,
let(:logger) { nil }
def list_queues
- queues = Sidekiq.redis do |conn|
- conn.scan_each(match: "queue:*").to_a
+ queues = []
+ Sidekiq.redis do |conn|
+ conn.scan("MATCH", "queue:*") { |key| queues << key }
end
queues.uniq.map { |queue| queue.split(':', 2).last }
end
diff --git a/spec/lib/gitlab/sidekiq_status_spec.rb b/spec/lib/gitlab/sidekiq_status_spec.rb
index 55e3885d257..ecdab2651a2 100644
--- a/spec/lib/gitlab/sidekiq_status_spec.rb
+++ b/spec/lib/gitlab/sidekiq_status_spec.rb
@@ -174,7 +174,7 @@ RSpec.describe Gitlab::SidekiqStatus, :clean_gitlab_redis_queues,
context 'when both multi-store feature flags are off' do
def with_redis(&block)
- Sidekiq.redis(&block)
+ Gitlab::Redis::Queues.with(&block)
end
before do
diff --git a/spec/migrations/20231212135235_queue_backfill_vs_code_settings_version_spec.rb b/spec/migrations/20231212135235_queue_backfill_vs_code_settings_version_spec.rb
new file mode 100644
index 00000000000..e3e08720950
--- /dev/null
+++ b/spec/migrations/20231212135235_queue_backfill_vs_code_settings_version_spec.rb
@@ -0,0 +1,26 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+require_migration!
+
+RSpec.describe QueueBackfillVsCodeSettingsVersion, feature_category: :web_ide do
+ let!(:batched_migration) { described_class::MIGRATION }
+
+ it 'schedules a new batched migration' do
+ reversible_migration do |migration|
+ migration.before -> {
+ expect(batched_migration).not_to have_scheduled_batched_migration
+ }
+
+ migration.after -> {
+ expect(batched_migration).to have_scheduled_batched_migration(
+ table_name: :vs_code_settings,
+ column_name: :id,
+ interval: described_class::DELAY_INTERVAL,
+ batch_size: described_class::BATCH_SIZE,
+ sub_batch_size: described_class::SUB_BATCH_SIZE
+ )
+ }
+ end
+ end
+end
diff --git a/spec/models/ci/pipeline_metadata_spec.rb b/spec/models/ci/pipeline_metadata_spec.rb
index 1a426118063..c114c0e945e 100644
--- a/spec/models/ci/pipeline_metadata_spec.rb
+++ b/spec/models/ci/pipeline_metadata_spec.rb
@@ -15,7 +15,7 @@ RSpec.describe Ci::PipelineMetadata, feature_category: :pipeline_composition do
is_expected.to define_enum_for(
:auto_cancel_on_new_commit
).with_values(
- conservative: 0, interruptible: 1, disabled: 2
+ conservative: 0, interruptible: 1, none: 2
).with_prefix
end
diff --git a/spec/models/ci/pipeline_spec.rb b/spec/models/ci/pipeline_spec.rb
index e075ea232d3..52c3792ac93 100644
--- a/spec/models/ci/pipeline_spec.rb
+++ b/spec/models/ci/pipeline_spec.rb
@@ -5727,4 +5727,36 @@ RSpec.describe Ci::Pipeline, :mailer, factory_default: :keep, feature_category:
end
end
end
+
+ describe '#auto_cancel_on_new_commit' do
+ let_it_be_with_reload(:pipeline) { create(:ci_pipeline, project: project) }
+
+ subject(:auto_cancel_on_new_commit) { pipeline.auto_cancel_on_new_commit }
+
+ context 'when pipeline_metadata is not present' do
+ it { is_expected.to eq('conservative') }
+ end
+
+ context 'when pipeline_metadata is present' do
+ before_all do
+ create(:ci_pipeline_metadata, project: pipeline.project, pipeline: pipeline)
+ end
+
+ context 'when auto_cancel_on_new_commit is nil' do
+ before do
+ pipeline.pipeline_metadata.auto_cancel_on_new_commit = nil
+ end
+
+ it { is_expected.to eq('conservative') }
+ end
+
+ context 'when auto_cancel_on_new_commit is a valid value' do
+ before do
+ pipeline.pipeline_metadata.auto_cancel_on_new_commit = 'interruptible'
+ end
+
+ it { is_expected.to eq('interruptible') }
+ end
+ end
+ end
end
diff --git a/spec/models/ci/runner_spec.rb b/spec/models/ci/runner_spec.rb
index 8c4d5fef24d..d4f7db3bddd 100644
--- a/spec/models/ci/runner_spec.rb
+++ b/spec/models/ci/runner_spec.rb
@@ -532,7 +532,7 @@ RSpec.describe Ci::Runner, type: :model, feature_category: :runner do
let_it_be(:runner3) { create(:ci_runner, creator_id: 1) }
let_it_be(:runner4) { create(:ci_runner, creator_id: nil) }
- it 'returns runners with creator_id \'1\'' do
+ it "returns runners with creator_id '1'" do
is_expected.to contain_exactly(runner2, runner3)
end
end
diff --git a/spec/models/project_authorizations/changes_spec.rb b/spec/models/project_authorizations/changes_spec.rb
index d6ccfccbcbe..714144841fb 100644
--- a/spec/models/project_authorizations/changes_spec.rb
+++ b/spec/models/project_authorizations/changes_spec.rb
@@ -28,36 +28,49 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
end
shared_examples_for 'publishes AuthorizationsChangedEvent' do
- it 'publishes a AuthorizationsChangedEvent event with project id' do
- project_ids.each do |project_id|
- project_data = { project_id: project_id }
- project_event = instance_double('::ProjectAuthorizations::AuthorizationsChangedEvent', data: project_data)
+ it 'does not publish a AuthorizationsChangedEvent event' do
+ expect(::Gitlab::EventStore).not_to receive(:publish)
+ .with(an_instance_of(::ProjectAuthorizations::AuthorizationsChangedEvent))
- allow(::ProjectAuthorizations::AuthorizationsChangedEvent).to receive(:new)
- .with(data: project_data)
- .and_return(project_event)
+ apply_project_authorization_changes
+ end
- allow(::Gitlab::EventStore).to receive(:publish)
- expect(::Gitlab::EventStore).to receive(:publish).with(project_event)
+ context 'when feature flag "add_policy_approvers_to_rules" is disabled' do
+ before do
+ stub_feature_flags(add_policy_approvers_to_rules: false)
end
- apply_project_authorization_changes
+ it 'publishes a AuthorizationsChangedEvent event with project id' do
+ allow(::Gitlab::EventStore).to receive(:publish)
+ project_ids.each do |project_id|
+ project_data = { project_id: project_id }
+ project_event = instance_double('::ProjectAuthorizations::AuthorizationsChangedEvent', data: project_data)
+
+ allow(::ProjectAuthorizations::AuthorizationsChangedEvent).to receive(:new)
+ .with(data: project_data)
+ .and_return(project_event)
+
+ expect(::Gitlab::EventStore).to receive(:publish).with(project_event)
+ end
+
+ apply_project_authorization_changes
+ end
end
end
shared_examples_for 'publishes AuthorizationsRemovedEvent' do
it 'publishes a AuthorizationsRemovedEvent event with project id' do
- project_ids.each do |project_id|
+ allow(::Gitlab::EventStore).to receive(:publish_group)
+ project_events = project_ids.map do |project_id|
project_data = { project_id: project_id, user_ids: user_ids }
project_event = instance_double('::ProjectAuthorizations::AuthorizationsRemovedEvent', data: project_data)
allow(::ProjectAuthorizations::AuthorizationsRemovedEvent).to receive(:new)
.with(data: project_data)
.and_return(project_event)
-
- allow(::Gitlab::EventStore).to receive(:publish)
- expect(::Gitlab::EventStore).to receive(:publish).with(project_event)
+ project_event
end
+ expect(::Gitlab::EventStore).to receive(:publish_group).with(project_events)
apply_project_authorization_changes
end
@@ -69,7 +82,43 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it 'does not publish a AuthorizationsRemovedEvent event' do
expect(::Gitlab::EventStore).not_to(
- receive(:publish).with(an_instance_of(::ProjectAuthorizations::AuthorizationsRemovedEvent))
+ receive(:publish_group).with(
+ array_including(an_instance_of(::ProjectAuthorizations::AuthorizationsRemovedEvent))
+ )
+ )
+
+ apply_project_authorization_changes
+ end
+ end
+ end
+
+ shared_examples_for 'publishes AuthorizationsAddedEvent' do
+ it 'publishes a AuthorizationsAddedEvent event with project id' do
+ allow(::Gitlab::EventStore).to receive(:publish_group)
+ project_events = project_ids.map do |project_id|
+ project_data = { project_id: project_id, user_ids: user_ids }
+ project_event = instance_double('::ProjectAuthorizations::AuthorizationsAddedEvent', data: project_data)
+
+ allow(::ProjectAuthorizations::AuthorizationsAddedEvent).to receive(:new)
+ .with(data: project_data)
+ .and_return(project_event)
+ project_event
+ end
+ expect(::Gitlab::EventStore).to receive(:publish_group).with(project_events)
+
+ apply_project_authorization_changes
+ end
+
+ context 'when feature flag "add_policy_approvers_to_rules" is disabled' do
+ before do
+ stub_feature_flags(add_policy_approvers_to_rules: false)
+ end
+
+ it 'does not publish a AuthorizationsAddedEvent event' do
+ expect(::Gitlab::EventStore).not_to(
+ receive(:publish_group).with(array_including(
+ an_instance_of(::ProjectAuthorizations::AuthorizationsAddedEvent))
+ )
)
apply_project_authorization_changes
@@ -88,8 +137,23 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
shared_examples_for 'does not publish AuthorizationsRemovedEvent' do
it 'does not publish a AuthorizationsRemovedEvent event' do
- expect(::Gitlab::EventStore).not_to receive(:publish)
- .with(an_instance_of(::ProjectAuthorizations::AuthorizationsRemovedEvent))
+ expect(::Gitlab::EventStore).not_to(
+ receive(:publish_group).with(
+ array_including(an_instance_of(::ProjectAuthorizations::AuthorizationsRemovedEvent))
+ )
+ )
+
+ apply_project_authorization_changes
+ end
+ end
+
+ shared_examples_for 'does not publish AuthorizationsAddedEvent' do
+ it 'does not publish a AuthorizationsAddedEvent event' do
+ expect(::Gitlab::EventStore).not_to(
+ receive(:publish_group).with(
+ array_including(an_instance_of(::ProjectAuthorizations::AuthorizationsAddedEvent))
+ )
+ )
apply_project_authorization_changes
end
@@ -101,6 +165,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
let_it_be(:project_2) { create(:project) }
let_it_be(:project_3) { create(:project) }
let(:project_ids) { [project_1.id, project_2.id, project_3.id] }
+ let(:user_ids) { [user.id] }
let(:authorizations_to_add) do
[
@@ -155,6 +220,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'logs the detail', batch_size: 2
it_behaves_like 'publishes AuthorizationsChangedEvent'
+ it_behaves_like 'publishes AuthorizationsAddedEvent'
it_behaves_like 'does not publish AuthorizationsRemovedEvent'
context 'when the GitLab installation does not have a replica database configured' do
@@ -166,6 +232,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'inserts the rows in batches, as per the `per_batch` size, without a delay between batches'
it_behaves_like 'does not log any detail'
it_behaves_like 'publishes AuthorizationsChangedEvent'
+ it_behaves_like 'publishes AuthorizationsAddedEvent'
it_behaves_like 'does not publish AuthorizationsRemovedEvent'
end
end
@@ -178,6 +245,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'inserts the rows in batches, as per the `per_batch` size, without a delay between batches'
it_behaves_like 'does not log any detail'
it_behaves_like 'publishes AuthorizationsChangedEvent'
+ it_behaves_like 'publishes AuthorizationsAddedEvent'
it_behaves_like 'does not publish AuthorizationsRemovedEvent'
end
end
@@ -242,6 +310,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'logs the detail', batch_size: 2
it_behaves_like 'publishes AuthorizationsChangedEvent'
it_behaves_like 'publishes AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
context 'when the GitLab installation does not have a replica database configured' do
before do
@@ -253,6 +322,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'does not log any detail'
it_behaves_like 'publishes AuthorizationsChangedEvent'
it_behaves_like 'publishes AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
end
end
@@ -265,6 +335,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'does not log any detail'
it_behaves_like 'publishes AuthorizationsChangedEvent'
it_behaves_like 'publishes AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
end
context 'when the user_ids list is empty' do
@@ -273,6 +344,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'does not removes project authorizations of the users in the current project'
it_behaves_like 'does not publish AuthorizationsChangedEvent'
it_behaves_like 'does not publish AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
end
context 'when the user_ids list is nil' do
@@ -281,6 +353,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'does not removes project authorizations of the users in the current project'
it_behaves_like 'does not publish AuthorizationsChangedEvent'
it_behaves_like 'does not publish AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
end
end
@@ -344,6 +417,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'logs the detail', batch_size: 2
it_behaves_like 'publishes AuthorizationsChangedEvent'
it_behaves_like 'publishes AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
context 'when the GitLab installation does not have a replica database configured' do
before do
@@ -355,6 +429,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'does not log any detail'
it_behaves_like 'publishes AuthorizationsChangedEvent'
it_behaves_like 'publishes AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
end
end
@@ -367,6 +442,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'does not log any detail'
it_behaves_like 'publishes AuthorizationsChangedEvent'
it_behaves_like 'publishes AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
end
context 'when the project_ids list is empty' do
@@ -375,6 +451,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'does not removes any project authorizations from the current user'
it_behaves_like 'does not publish AuthorizationsChangedEvent'
it_behaves_like 'does not publish AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
end
context 'when the user_ids list is nil' do
@@ -383,6 +460,7 @@ RSpec.describe ProjectAuthorizations::Changes, feature_category: :groups_and_pro
it_behaves_like 'does not removes any project authorizations from the current user'
it_behaves_like 'does not publish AuthorizationsChangedEvent'
it_behaves_like 'does not publish AuthorizationsRemovedEvent'
+ it_behaves_like 'does not publish AuthorizationsAddedEvent'
end
end
end
diff --git a/spec/models/user_spec.rb b/spec/models/user_spec.rb
index 68fdbeba84d..6d3e9058f91 100644
--- a/spec/models/user_spec.rb
+++ b/spec/models/user_spec.rb
@@ -5798,27 +5798,6 @@ RSpec.describe User, feature_category: :user_profile do
expect(user.namespace).to be_nil
end
-
- context 'when create_personal_ns_outside_model feature flag is disabled' do
- before do
- stub_feature_flags(create_personal_ns_outside_model: false)
- end
-
- it 'creates the namespace' do
- expect(user.namespace).to be_nil
-
- user.save!
-
- expect(user.namespace).to be_present
- expect(user.namespace).to be_kind_of(Namespaces::UserNamespace)
- end
-
- it 'creates the namespace setting' do
- user.save!
-
- expect(user.namespace.namespace_settings).to be_persisted
- end
- end
end
context 'for an existing user' do
diff --git a/spec/requests/api/graphql/ci/runner_spec.rb b/spec/requests/api/graphql/ci/runner_spec.rb
index 8262640b283..1b6948d0380 100644
--- a/spec/requests/api/graphql/ci/runner_spec.rb
+++ b/spec/requests/api/graphql/ci/runner_spec.rb
@@ -876,107 +876,95 @@ RSpec.describe 'Query.runner(id)', :freeze_time, feature_category: :fleet_visibi
end
describe 'Query limits' do
- def runner_query(runner)
- <<~SINGLE
- runner(id: "#{runner.to_global_id}") {
- #{all_graphql_fields_for('CiRunner', excluded: excluded_fields)}
- createdBy {
- id
- username
- webPath
- webUrl
- }
- groups {
- nodes {
- id
- path
- fullPath
- webUrl
- }
- }
- projects {
- nodes {
- id
- path
- fullPath
- webUrl
- }
- }
- ownerProject {
- id
- path
- fullPath
- webUrl
- }
+ let_it_be(:user2) { another_admin }
+ let_it_be(:user3) { create(:user) }
+ let_it_be(:tag_list) { %w[n_plus_1_test some_tag] }
+ let_it_be(:args) do
+ { current_user: user, token: { personal_access_token: create(:personal_access_token, user: user) } }
+ end
+
+ let_it_be(:runner1) { create(:ci_runner, tag_list: tag_list, creator: user) }
+ let_it_be(:runner2) do
+ create(:ci_runner, :group, groups: [group], tag_list: tag_list, creator: user)
+ end
+
+ let_it_be(:runner3) do
+ create(:ci_runner, :project, projects: [project1], tag_list: tag_list, creator: user)
+ end
+
+ let(:single_discrete_runners_query) do
+ multiple_discrete_runners_query([])
+ end
+
+ let(:runner_fragment) do
+ <<~QUERY
+ #{all_graphql_fields_for('CiRunner', excluded: excluded_fields)}
+ createdBy {
+ id
+ username
+ webPath
+ webUrl
}
- SINGLE
+ QUERY
end
- let(:active_project_runner2) { create(:ci_runner, :project) }
- let(:active_group_runner2) { create(:ci_runner, :group) }
+ # Exclude fields that are already hardcoded above (or tested separately),
+ # and also some fields from deeper objects which are problematic:
+ # - createdBy: Known N+1 issues, but only on exotic fields which we don't normally use
+ # - ownerProject.pipeline: Needs arguments (iid or sha)
+ # - project.productAnalyticsState: Can be requested only for 1 Project(s) at a time.
+ let(:excluded_fields) { %w[createdBy jobs pipeline productAnalyticsState] }
+
+ it 'avoids N+1 queries', :use_sql_query_cache do
+ discrete_runners_control = ActiveRecord::QueryRecorder.new(skip_cached: false) do
+ post_graphql(single_discrete_runners_query, **args)
+ end
+
+ additional_runners = setup_additional_records
+
+ expect do
+ post_graphql(multiple_discrete_runners_query(additional_runners), **args)
- # Exclude fields that are already hardcoded above
- let(:excluded_fields) { %w[createdBy jobs groups projects ownerProject] }
+ raise StandardError, flattened_errors if graphql_errors # Ensure any error in query causes test to fail
+ end.not_to exceed_query_limit(discrete_runners_control)
+ end
- let(:single_query) do
+ def runner_query(runner, nr)
<<~QUERY
- {
- instance_runner1: #{runner_query(active_instance_runner)}
- group_runner1: #{runner_query(active_group_runner)}
- project_runner1: #{runner_query(active_project_runner)}
+ runner#{nr}: runner(id: "#{runner.to_global_id}") {
+ #{runner_fragment}
}
QUERY
end
- let(:double_query) do
+ def multiple_discrete_runners_query(additional_runners)
<<~QUERY
{
- instance_runner1: #{runner_query(active_instance_runner)}
- instance_runner2: #{runner_query(inactive_instance_runner)}
- group_runner1: #{runner_query(active_group_runner)}
- group_runner2: #{runner_query(active_group_runner2)}
- project_runner1: #{runner_query(active_project_runner)}
- project_runner2: #{runner_query(active_project_runner2)}
+ #{runner_query(runner1, 1)}
+ #{runner_query(runner2, 2)}
+ #{runner_query(runner3, 3)}
+ #{additional_runners.each_with_index.map { |r, i| runner_query(r, 4 + i) }.join("\n")}
}
QUERY
end
- it 'does not execute more queries per runner', :aggregate_failures, quarantine: "https://gitlab.com/gitlab-org/gitlab/-/issues/391442" do
- # warm-up license cache and so on:
- personal_access_token = create(:personal_access_token, user: user)
- args = { current_user: user, token: { personal_access_token: personal_access_token } }
- post_graphql(double_query, **args)
-
- control = ActiveRecord::QueryRecorder.new { post_graphql(single_query, **args) }
-
- personal_access_token = create(:personal_access_token, user: another_admin)
- args = { current_user: another_admin, token: { personal_access_token: personal_access_token } }
- expect { post_graphql(double_query, **args) }.not_to exceed_query_limit(control)
-
- expect(graphql_data.count).to eq 6
- expect(graphql_data).to match(
- a_hash_including(
- 'instance_runner1' => a_graphql_entity_for(active_instance_runner),
- 'instance_runner2' => a_graphql_entity_for(inactive_instance_runner),
- 'group_runner1' => a_graphql_entity_for(
- active_group_runner,
- groups: { 'nodes' => contain_exactly(a_graphql_entity_for(group)) }
- ),
- 'group_runner2' => a_graphql_entity_for(
- active_group_runner2,
- groups: { 'nodes' => active_group_runner2.groups.map { |g| a_graphql_entity_for(g) } }
- ),
- 'project_runner1' => a_graphql_entity_for(
- active_project_runner,
- projects: { 'nodes' => active_project_runner.projects.map { |p| a_graphql_entity_for(p) } },
- owner_project: a_graphql_entity_for(active_project_runner.projects[0])
- ),
- 'project_runner2' => a_graphql_entity_for(
- active_project_runner2,
- projects: { 'nodes' => active_project_runner2.projects.map { |p| a_graphql_entity_for(p) } },
- owner_project: a_graphql_entity_for(active_project_runner2.projects[0])
- )
- ))
+ def setup_additional_records
+ # Add more runners (including owned by other users)
+ runner4 = create(:ci_runner, tag_list: tag_list + %w[tag1 tag2], creator: user2)
+ runner5 = create(:ci_runner, :group, groups: [create(:group)], tag_list: tag_list + %w[tag2 tag3], creator: user3)
+ # Add one more project to runner
+ runner3.assign_to(create(:project))
+
+ # Add more runner managers (including to existing runners)
+ runner_manager1 = create(:ci_runner_machine, runner: runner1)
+ create(:ci_runner_machine, runner: runner1)
+ create(:ci_runner_machine, runner: runner2, system_xid: runner_manager1.system_xid)
+ create(:ci_runner_machine, runner: runner3)
+ create(:ci_runner_machine, runner: runner4, version: '16.4.1')
+ create(:ci_runner_machine, runner: runner5, version: '16.4.0', system_xid: runner_manager1.system_xid)
+ create(:ci_runner_machine, runner: runner3)
+
+ [runner4, runner5]
end
end
diff --git a/spec/requests/api/graphql/ci/runners_spec.rb b/spec/requests/api/graphql/ci/runners_spec.rb
index 0fe14bef778..189106fae7b 100644
--- a/spec/requests/api/graphql/ci/runners_spec.rb
+++ b/spec/requests/api/graphql/ci/runners_spec.rb
@@ -18,22 +18,34 @@ RSpec.describe 'Query.runners', feature_category: :fleet_visibility do
let(:fields) do
<<~QUERY
nodes {
- #{all_graphql_fields_for('CiRunner', excluded: %w[createdBy ownerProject])}
- createdBy {
- username
- webPath
- webUrl
- }
- ownerProject {
- id
- path
- fullPath
- webUrl
- }
+ #{all_graphql_fields_for('CiRunner', excluded: excluded_fields)}
}
QUERY
end
+ let(:query) do
+ %(
+ query {
+ runners {
+ #{fields}
+ }
+ }
+ )
+ end
+
+ # Exclude fields from deeper objects which are problematic:
+ # - ownerProject.pipeline: Needs arguments (iid or sha)
+ # - project.productAnalyticsState: Can be requested only for 1 Project(s) at a time.
+ let(:excluded_fields) { %w[pipeline productAnalyticsState] }
+
+ it 'returns expected runners' do
+ post_graphql(query, current_user: current_user)
+
+ expect(runners_graphql_data['nodes']).to contain_exactly(
+ *Ci::Runner.all.map { |expected_runner| a_graphql_entity_for(expected_runner) }
+ )
+ end
+
context 'with filters' do
shared_examples 'a working graphql query returning expected runners' do
it_behaves_like 'a working graphql query' do
@@ -49,31 +61,6 @@ RSpec.describe 'Query.runners', feature_category: :fleet_visibility do
*Array(expected_runners).map { |expected_runner| a_graphql_entity_for(expected_runner) }
)
end
-
- it 'does not execute more queries per runner', :aggregate_failures do
- # warm-up license cache and so on:
- personal_access_token = create(:personal_access_token, user: current_user)
- args = { current_user: current_user, token: { personal_access_token: personal_access_token } }
- post_graphql(query, **args)
- expect(graphql_data_at(:runners, :nodes)).not_to be_empty
-
- admin2 = create(:admin)
- personal_access_token = create(:personal_access_token, user: admin2)
- args = { current_user: admin2, token: { personal_access_token: personal_access_token } }
- control = ActiveRecord::QueryRecorder.new { post_graphql(query, **args) }
-
- runner2 = create(:ci_runner, :instance, version: '14.0.0', tag_list: %w[tag5 tag6], creator: admin2)
- runner3 = create(:ci_runner, :project, version: '14.0.1', projects: [project], tag_list: %w[tag3 tag8],
- creator: current_user)
-
- create(:ci_build, :failed, runner: runner2)
- create(:ci_runner_machine, runner: runner2, version: '16.4.1')
-
- create(:ci_build, :failed, runner: runner3)
- create(:ci_runner_machine, runner: runner3, version: '16.4.0')
-
- expect { post_graphql(query, **args) }.not_to exceed_query_limit(control)
- end
end
context 'when filtered on type and status' do
@@ -179,52 +166,88 @@ RSpec.describe 'Query.runners', feature_category: :fleet_visibility do
end
end
end
+ end
- context 'without filters' do
- context 'with managers requested for multiple runners' do
- let(:fields) do
- <<~QUERY
- nodes {
- managers {
- nodes {
- #{all_graphql_fields_for('CiRunnerManager', max_depth: 1)}
- }
- }
- }
- QUERY
- end
+ describe 'Runner query limits' do
+ let_it_be(:user) { create(:user, :admin) }
+ let_it_be(:user2) { create(:user) }
+ let_it_be(:user3) { create(:user) }
+ let_it_be(:group) { create(:group) }
+ let_it_be(:project) { create(:project) }
+ let_it_be(:tag_list) { %w[n_plus_1_test some_tag] }
+ let_it_be(:args) do
+ { current_user: user, token: { personal_access_token: create(:personal_access_token, user: user) } }
+ end
- let(:query) do
- %(
- query {
- runners {
- #{fields}
- }
- }
- )
- end
+ let_it_be(:runner1) { create(:ci_runner, tag_list: tag_list, creator: user) }
+ let_it_be(:runner2) do
+ create(:ci_runner, :group, groups: [group], tag_list: tag_list, creator: user)
+ end
- it 'does not execute more queries per runner', :aggregate_failures do
- # warm-up license cache and so on:
- personal_access_token = create(:personal_access_token, user: current_user)
- args = { current_user: current_user, token: { personal_access_token: personal_access_token } }
- post_graphql(query, **args)
- expect(graphql_data_at(:runners, :nodes)).not_to be_empty
-
- admin2 = create(:admin)
- personal_access_token = create(:personal_access_token, user: admin2)
- args = { current_user: admin2, token: { personal_access_token: personal_access_token } }
- control = ActiveRecord::QueryRecorder.new { post_graphql(query, **args) }
-
- create(:ci_runner, :instance, :with_runner_manager, version: '14.0.0', tag_list: %w[tag5 tag6],
- creator: admin2)
- create(:ci_runner, :project, :with_runner_manager, version: '14.0.1', projects: [project],
- tag_list: %w[tag3 tag8],
- creator: current_user)
-
- expect { post_graphql(query, **args) }.not_to exceed_query_limit(control)
- end
- end
+ let_it_be(:runner3) do
+ create(:ci_runner, :project, projects: [project], tag_list: tag_list, creator: user)
+ end
+
+ let(:runner_fragment) do
+ <<~QUERY
+ #{all_graphql_fields_for('CiRunner', excluded: excluded_fields)}
+ createdBy {
+ id
+ username
+ webPath
+ webUrl
+ }
+ QUERY
+ end
+
+ # Exclude fields that are already hardcoded above (or tested separately),
+ # and also some fields from deeper objects which are problematic:
+ # - createdBy: Known N+1 issues, but only on exotic fields which we don't normally use
+ # - ownerProject.pipeline: Needs arguments (iid or sha)
+ # - project.productAnalyticsState: Can be requested only for 1 Project(s) at a time.
+ let(:excluded_fields) { %w[createdBy jobs pipeline productAnalyticsState] }
+
+ let(:runners_query) do
+ <<~QUERY
+ {
+ runners {
+ nodes { #{runner_fragment} }
+ }
+ }
+ QUERY
+ end
+
+ it 'avoids N+1 queries', :use_sql_query_cache do
+ personal_access_token = create(:personal_access_token, user: user)
+ args = { current_user: user, token: { personal_access_token: personal_access_token } }
+
+ runners_control = ActiveRecord::QueryRecorder.new(skip_cached: false) { post_graphql(runners_query, **args) }
+
+ setup_additional_records
+
+ expect { post_graphql(runners_query, **args) }.not_to exceed_query_limit(runners_control)
+ end
+
+ def setup_additional_records
+ # Add more runners (including owned by other users)
+ runner4 = create(:ci_runner, tag_list: tag_list + %w[tag1 tag2], creator: user2)
+ runner5 = create(:ci_runner, :group, groups: [create(:group)], tag_list: tag_list + %w[tag2 tag3], creator: user3)
+ # Add one more project to runner
+ runner3.assign_to(create(:project))
+
+ # Add more runner managers (including to existing runners)
+ runner_manager1 = create(:ci_runner_machine, runner: runner1)
+ create(:ci_runner_machine, runner: runner1)
+ create(:ci_runner_machine, runner: runner2, system_xid: runner_manager1.system_xid)
+ create(:ci_runner_machine, runner: runner3)
+ create(:ci_runner_machine, runner: runner4, version: '16.4.1')
+ create(:ci_runner_machine, runner: runner5, version: '16.4.0', system_xid: runner_manager1.system_xid)
+ create(:ci_runner_machine, runner: runner3)
+
+ create(:ci_build, :failed, runner: runner4)
+ create(:ci_build, :failed, runner: runner5)
+
+ [runner4, runner5]
end
end
diff --git a/spec/services/ci/cancel_pipeline_service_spec.rb b/spec/services/ci/cancel_pipeline_service_spec.rb
index 256d2db1ed2..6051485c4df 100644
--- a/spec/services/ci/cancel_pipeline_service_spec.rb
+++ b/spec/services/ci/cancel_pipeline_service_spec.rb
@@ -13,12 +13,14 @@ RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category:
current_user: current_user,
cascade_to_children: cascade_to_children,
auto_canceled_by_pipeline: auto_canceled_by_pipeline,
- execute_async: execute_async)
+ execute_async: execute_async,
+ safe_cancellation: safe_cancellation)
end
let(:cascade_to_children) { true }
let(:auto_canceled_by_pipeline) { nil }
let(:execute_async) { true }
+ let(:safe_cancellation) { false }
shared_examples 'force_execute' do
context 'when pipeline is not cancelable' do
@@ -30,9 +32,14 @@ RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category:
context 'when pipeline is cancelable' do
before do
- create(:ci_build, :running, pipeline: pipeline)
- create(:ci_build, :created, pipeline: pipeline)
- create(:ci_build, :success, pipeline: pipeline)
+ create(:ci_build, :running, pipeline: pipeline, name: 'build1')
+ create(:ci_build, :created, pipeline: pipeline, name: 'build2')
+ create(:ci_build, :success, pipeline: pipeline, name: 'build3')
+ create(:ci_build, :pending, :interruptible, pipeline: pipeline, name: 'build4')
+
+ create(:ci_bridge, :running, pipeline: pipeline, name: 'bridge1')
+ create(:ci_bridge, :running, :interruptible, pipeline: pipeline, name: 'bridge2')
+ create(:ci_bridge, :success, :interruptible, pipeline: pipeline, name: 'bridge3')
end
it 'logs the event' do
@@ -55,7 +62,15 @@ RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category:
it 'cancels all cancelable jobs' do
expect(response).to be_success
- expect(pipeline.all_jobs.pluck(:status)).to match_array(%w[canceled canceled success])
+ expect(pipeline.all_jobs.pluck(:name, :status)).to match_array([
+ %w[build1 canceled],
+ %w[build2 canceled],
+ %w[build3 success],
+ %w[build4 canceled],
+ %w[bridge1 canceled],
+ %w[bridge2 canceled],
+ %w[bridge3 success]
+ ])
end
context 'when auto_canceled_by_pipeline is provided' do
@@ -74,6 +89,28 @@ RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category:
end
end
+ context 'when cascade_to_children: false and safe_cancellation: true' do
+ # We are testing the `safe_cancellation: true`` case with only `cascade_to_children: false`
+ # because `safe_cancellation` is passed as `true` only when `cascade_to_children` is `false`
+ # from `CancelRedundantPipelinesService`.
+
+ let(:cascade_to_children) { false }
+ let(:safe_cancellation) { true }
+
+ it 'cancels only interruptible jobs' do
+ expect(response).to be_success
+ expect(pipeline.all_jobs.pluck(:name, :status)).to match_array([
+ %w[build1 running],
+ %w[build2 created],
+ %w[build3 success],
+ %w[build4 canceled],
+ %w[bridge1 running],
+ %w[bridge2 canceled],
+ %w[bridge3 success]
+ ])
+ end
+ end
+
context 'when pipeline has child pipelines' do
let(:child_pipeline) { create(:ci_pipeline, child_of: pipeline) }
let!(:child_job) { create(:ci_build, :running, pipeline: child_pipeline) }
@@ -81,8 +118,8 @@ RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category:
let!(:grandchild_job) { create(:ci_build, :running, pipeline: grandchild_pipeline) }
before do
- child_pipeline.source_bridge.update!(status: :running)
- grandchild_pipeline.source_bridge.update!(status: :running)
+ child_pipeline.source_bridge.update!(name: 'child_pipeline_bridge', status: :running)
+ grandchild_pipeline.source_bridge.update!(name: 'grandchild_pipeline_bridge', status: :running)
end
context 'when execute_async: false' do
@@ -91,8 +128,15 @@ RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category:
it 'cancels the bridge jobs and child jobs' do
expect(response).to be_success
- expect(pipeline.bridges.pluck(:status)).to be_all('canceled')
- expect(child_pipeline.bridges.pluck(:status)).to be_all('canceled')
+ expect(pipeline.bridges.pluck(:name, :status)).to match_array([
+ %w[bridge1 canceled],
+ %w[bridge2 canceled],
+ %w[bridge3 success],
+ %w[child_pipeline_bridge canceled]
+ ])
+ expect(child_pipeline.bridges.pluck(:name, :status)).to match_array([
+ %w[grandchild_pipeline_bridge canceled]
+ ])
expect(child_job.reload).to be_canceled
expect(grandchild_job.reload).to be_canceled
end
@@ -110,7 +154,12 @@ RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category:
expect(response).to be_success
- expect(pipeline.bridges.pluck(:status)).to be_all('canceled')
+ expect(pipeline.bridges.pluck(:name, :status)).to match_array([
+ %w[bridge1 canceled],
+ %w[bridge2 canceled],
+ %w[bridge3 success],
+ %w[child_pipeline_bridge canceled]
+ ])
end
end
@@ -124,7 +173,12 @@ RSpec.describe Ci::CancelPipelineService, :aggregate_failures, feature_category:
expect(response).to be_success
- expect(pipeline.bridges.pluck(:status)).to be_all('canceled')
+ expect(pipeline.bridges.pluck(:name, :status)).to match_array([
+ %w[bridge1 canceled],
+ %w[bridge2 canceled],
+ %w[bridge3 success],
+ %w[child_pipeline_bridge canceled]
+ ])
expect(child_job.reload).to be_running
end
end
diff --git a/spec/services/ci/create_pipeline_service/workflow_auto_cancel_spec.rb b/spec/services/ci/create_pipeline_service/workflow_auto_cancel_spec.rb
index 851c6f8fbea..3ad6164bd01 100644
--- a/spec/services/ci/create_pipeline_service/workflow_auto_cancel_spec.rb
+++ b/spec/services/ci/create_pipeline_service/workflow_auto_cancel_spec.rb
@@ -57,7 +57,7 @@ RSpec.describe Ci::CreatePipelineService, :yaml_processor_feature_flag_corectnes
it 'creates a pipeline with errors' do
expect(pipeline).to be_persisted
expect(pipeline.errors.full_messages).to include(
- 'workflow:auto_cancel on new commit must be one of: conservative, interruptible, disabled')
+ 'workflow:auto_cancel on new commit must be one of: conservative, interruptible, none')
end
end
end
diff --git a/spec/services/ci/pipeline_creation/cancel_redundant_pipelines_service_spec.rb b/spec/services/ci/pipeline_creation/cancel_redundant_pipelines_service_spec.rb
index 0d83187f9e4..7b5eef92f53 100644
--- a/spec/services/ci/pipeline_creation/cancel_redundant_pipelines_service_spec.rb
+++ b/spec/services/ci/pipeline_creation/cancel_redundant_pipelines_service_spec.rb
@@ -53,7 +53,7 @@ RSpec.describe Ci::PipelineCreation::CancelRedundantPipelinesService, feature_ca
project.update!(auto_cancel_pending_pipelines: 'enabled')
end
- it 'cancels only previous interruptible builds' do
+ it 'cancels only previous non started builds' do
execute
expect(build_statuses(prev_pipeline)).to contain_exactly('canceled', 'success', 'canceled')
@@ -153,6 +153,36 @@ RSpec.describe Ci::PipelineCreation::CancelRedundantPipelinesService, feature_ca
expect(build_statuses(child_pipeline)).to contain_exactly('running', 'success')
end
+
+ context 'when the child pipeline auto_cancel_on_new_commit is `interruptible`' do
+ before do
+ child_pipeline.create_pipeline_metadata!(
+ project: child_pipeline.project, auto_cancel_on_new_commit: 'interruptible'
+ )
+ end
+
+ it 'cancels interruptible child pipeline builds' do
+ expect(build_statuses(child_pipeline)).to contain_exactly('running', 'success')
+
+ execute
+
+ expect(build_statuses(child_pipeline)).to contain_exactly('canceled', 'success')
+ end
+
+ context 'when the FF ci_workflow_auto_cancel_on_new_commit is disabled' do
+ before do
+ stub_feature_flags(ci_workflow_auto_cancel_on_new_commit: false)
+ end
+
+ it 'does not cancel any child pipeline builds' do
+ expect(build_statuses(child_pipeline)).to contain_exactly('running', 'success')
+
+ execute
+
+ expect(build_statuses(child_pipeline)).to contain_exactly('running', 'success')
+ end
+ end
+ end
end
context 'when the child pipeline has non-interruptible non-started job' do
@@ -227,6 +257,37 @@ RSpec.describe Ci::PipelineCreation::CancelRedundantPipelinesService, feature_ca
end
end
+ context 'when there are non-interruptible completed jobs in the pipeline' do
+ before do
+ create(:ci_build, :failed, pipeline: prev_pipeline)
+ create(:ci_build, :success, pipeline: prev_pipeline)
+ end
+
+ it 'does not cancel any job' do
+ execute
+
+ expect(job_statuses(prev_pipeline)).to contain_exactly(
+ 'running', 'success', 'created', 'failed', 'success'
+ )
+ expect(job_statuses(pipeline)).to contain_exactly('pending')
+ end
+
+ context 'when the FF ci_workflow_auto_cancel_on_new_commit is disabled' do
+ before do
+ stub_feature_flags(ci_workflow_auto_cancel_on_new_commit: false)
+ end
+
+ it 'does not cancel any job' do
+ execute
+
+ expect(job_statuses(prev_pipeline)).to contain_exactly(
+ 'running', 'success', 'created', 'failed', 'success'
+ )
+ expect(job_statuses(pipeline)).to contain_exactly('pending')
+ end
+ end
+ end
+
context 'when there are trigger jobs' do
before do
create(:ci_bridge, :created, pipeline: prev_pipeline)
@@ -246,6 +307,152 @@ RSpec.describe Ci::PipelineCreation::CancelRedundantPipelinesService, feature_ca
end
end
+ context 'when auto_cancel_on_new_commit is `interruptible`' do
+ before do
+ prev_pipeline.create_pipeline_metadata!(
+ project: prev_pipeline.project, auto_cancel_on_new_commit: 'interruptible'
+ )
+ end
+
+ it 'cancels only interruptible jobs' do
+ execute
+
+ expect(job_statuses(prev_pipeline)).to contain_exactly('canceled', 'success', 'created')
+ expect(job_statuses(pipeline)).to contain_exactly('pending')
+ end
+
+ context 'when the FF ci_workflow_auto_cancel_on_new_commit is disabled' do
+ before do
+ stub_feature_flags(ci_workflow_auto_cancel_on_new_commit: false)
+ end
+
+ it 'cancels non started builds' do
+ execute
+
+ expect(build_statuses(prev_pipeline)).to contain_exactly('canceled', 'success', 'canceled')
+ expect(build_statuses(pipeline)).to contain_exactly('pending')
+ end
+ end
+
+ context 'when there are non-interruptible completed jobs in the pipeline' do
+ before do
+ create(:ci_build, :failed, pipeline: prev_pipeline)
+ create(:ci_build, :success, pipeline: prev_pipeline)
+ end
+
+ it 'still cancels only interruptible jobs' do
+ execute
+
+ expect(job_statuses(prev_pipeline)).to contain_exactly(
+ 'canceled', 'success', 'created', 'failed', 'success'
+ )
+ expect(job_statuses(pipeline)).to contain_exactly('pending')
+ end
+
+ context 'when the FF ci_workflow_auto_cancel_on_new_commit is disabled' do
+ before do
+ stub_feature_flags(ci_workflow_auto_cancel_on_new_commit: false)
+ end
+
+ it 'does not cancel any job' do
+ execute
+
+ expect(build_statuses(prev_pipeline)).to contain_exactly(
+ 'created', 'success', 'running', 'failed', 'success'
+ )
+ expect(build_statuses(pipeline)).to contain_exactly('pending')
+ end
+ end
+ end
+ end
+
+ context 'when auto_cancel_on_new_commit is `none`' do
+ before do
+ prev_pipeline.create_pipeline_metadata!(
+ project: prev_pipeline.project, auto_cancel_on_new_commit: 'none'
+ )
+ end
+
+ it 'does not cancel any job' do
+ execute
+
+ expect(job_statuses(prev_pipeline)).to contain_exactly('running', 'success', 'created')
+ expect(job_statuses(pipeline)).to contain_exactly('pending')
+ end
+ end
+
+ context 'when auto_cancel_on_new_commit is `conservative`' do
+ before do
+ prev_pipeline.create_pipeline_metadata!(
+ project: prev_pipeline.project, auto_cancel_on_new_commit: 'conservative'
+ )
+ end
+
+ it 'cancels only previous non started builds' do
+ execute
+
+ expect(build_statuses(prev_pipeline)).to contain_exactly('canceled', 'success', 'canceled')
+ expect(build_statuses(pipeline)).to contain_exactly('pending')
+ end
+
+ context 'when the FF ci_workflow_auto_cancel_on_new_commit is disabled' do
+ before do
+ stub_feature_flags(ci_workflow_auto_cancel_on_new_commit: false)
+ end
+
+ it 'cancels only previous non started builds' do
+ execute
+
+ expect(build_statuses(prev_pipeline)).to contain_exactly('canceled', 'success', 'canceled')
+ expect(build_statuses(pipeline)).to contain_exactly('pending')
+ end
+ end
+
+ context 'when there are non-interruptible completed jobs in the pipeline' do
+ before do
+ create(:ci_build, :failed, pipeline: prev_pipeline)
+ create(:ci_build, :success, pipeline: prev_pipeline)
+ end
+
+ it 'does not cancel any job' do
+ execute
+
+ expect(job_statuses(prev_pipeline)).to contain_exactly(
+ 'running', 'success', 'created', 'failed', 'success'
+ )
+ expect(job_statuses(pipeline)).to contain_exactly('pending')
+ end
+
+ context 'when the FF ci_workflow_auto_cancel_on_new_commit is disabled' do
+ before do
+ stub_feature_flags(ci_workflow_auto_cancel_on_new_commit: false)
+ end
+
+ it 'does not cancel any job' do
+ execute
+
+ expect(job_statuses(prev_pipeline)).to contain_exactly(
+ 'running', 'success', 'created', 'failed', 'success'
+ )
+ expect(job_statuses(pipeline)).to contain_exactly('pending')
+ end
+ end
+ end
+ end
+
+ context 'when auto_cancel_on_new_commit is an invalid value' do
+ before do
+ allow(prev_pipeline).to receive(:auto_cancel_on_new_commit).and_return('invalid')
+ relation = Ci::Pipeline.id_in(prev_pipeline.id)
+ allow(relation).to receive(:each).and_yield(prev_pipeline)
+ allow(Ci::Pipeline).to receive(:id_in).and_return(relation)
+ end
+
+ it 'raises an error' do
+ expect { execute }.to raise_error(ArgumentError, 'Unknown auto_cancel_on_new_commit value: invalid')
+ end
+ end
+
it 'does not cancel future pipelines' do
expect(prev_pipeline.id).to be < pipeline.id
expect(build_statuses(pipeline)).to contain_exactly('pending')
diff --git a/spec/services/members/create_service_spec.rb b/spec/services/members/create_service_spec.rb
index af151f93dc7..c08b40e9528 100644
--- a/spec/services/members/create_service_spec.rb
+++ b/spec/services/members/create_service_spec.rb
@@ -119,14 +119,34 @@ RSpec.describe Members::CreateService, :aggregate_failures, :clean_gitlab_redis_
before do
# validations will fail because we try to invite them to the project as a guest
source.group.add_developer(member)
+ allow(Gitlab::EventStore).to receive(:publish)
end
- it 'triggers the members added and authorizations changed events' do
+ it 'triggers the authorizations changed events' do
expect(Gitlab::EventStore)
- .to receive(:publish)
- .with(an_instance_of(ProjectAuthorizations::AuthorizationsChangedEvent))
+ .to receive(:publish_group)
+ .with(array_including(an_instance_of(ProjectAuthorizations::AuthorizationsAddedEvent)))
.and_call_original
+ execute_service
+ end
+
+ context 'when feature flag "add_policy_approvers_to_rules" is disabled' do
+ before do
+ stub_feature_flags(add_policy_approvers_to_rules: false)
+ end
+
+ it 'triggers the authorizations changed event' do
+ expect(Gitlab::EventStore)
+ .to receive(:publish)
+ .with(an_instance_of(ProjectAuthorizations::AuthorizationsChangedEvent))
+ .and_call_original
+
+ execute_service
+ end
+ end
+
+ it 'triggers the members added event' do
expect(Gitlab::EventStore)
.to receive(:publish)
.with(an_instance_of(Members::MembersAddedEvent))
diff --git a/spec/support/finder_collection.rb b/spec/support/finder_collection.rb
index 494dd4bdca1..93363943449 100644
--- a/spec/support/finder_collection.rb
+++ b/spec/support/finder_collection.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
module Support
# Ensure that finders' `execute` method always returns
diff --git a/spec/support/helpers/dns_helpers.rb b/spec/support/helpers/dns_helpers.rb
index be26c80d217..e673e36adbd 100644
--- a/spec/support/helpers/dns_helpers.rb
+++ b/spec/support/helpers/dns_helpers.rb
@@ -6,6 +6,7 @@ module DnsHelpers
stub_invalid_dns!
permit_local_dns!
permit_postgresql!
+ permit_redis!
end
def permit_dns!
@@ -53,6 +54,18 @@ module DnsHelpers
ActiveRecord::Base.configurations.configs_for(env_name: Rails.env).map(&:host).compact.uniq
end
+ def permit_redis!
+ # https://github.com/redis-rb/redis-client/blob/v0.11.2/lib/redis_client/ruby_connection.rb#L51 uses Socket.tcp that
+ # calls Addrinfo.getaddrinfo internally.
+ hosts = Gitlab::Redis::ALL_CLASSES.map do |redis_instance|
+ redis_instance.redis_client_params[:host]
+ end.uniq.compact
+
+ hosts.each do |host|
+ allow(Addrinfo).to receive(:getaddrinfo).with(host, anything, nil, :STREAM, anything, anything, any_args).and_call_original
+ end
+ end
+
def stub_resolver(stubbed_lookups = {})
resolver = instance_double('Resolv::DNS')
allow(resolver).to receive(:timeouts=)
diff --git a/spec/support/shared_contexts/lib/gitlab/sidekiq_logging/structured_logger_shared_context.rb b/spec/support/shared_contexts/lib/gitlab/sidekiq_logging/structured_logger_shared_context.rb
index 69c20a00c5a..060976eba2d 100644
--- a/spec/support/shared_contexts/lib/gitlab/sidekiq_logging/structured_logger_shared_context.rb
+++ b/spec/support/shared_contexts/lib/gitlab/sidekiq_logging/structured_logger_shared_context.rb
@@ -93,8 +93,6 @@ RSpec.shared_context 'structured_logger' do
end
before do
- allow(Sidekiq).to receive(:logger).and_return(logger)
-
allow(subject).to receive(:current_time).and_return(timestamp.to_f)
allow(Process).to receive(:clock_gettime).with(Process::CLOCK_REALTIME, :float_second)
@@ -103,7 +101,7 @@ RSpec.shared_context 'structured_logger' do
.and_return(clock_thread_cputime_start, clock_thread_cputime_end)
end
- subject { described_class.new }
+ subject { described_class.new(logger) }
def call_subject(job, queue)
# This structured logger strongly depends on execution of `InstrumentationLogger`
diff --git a/spec/support/shared_contexts/lib/gitlab/sidekiq_middleware/server_metrics_shared_context.rb b/spec/support/shared_contexts/lib/gitlab/sidekiq_middleware/server_metrics_shared_context.rb
index 85ee3ed4183..d541dee438e 100644
--- a/spec/support/shared_contexts/lib/gitlab/sidekiq_middleware/server_metrics_shared_context.rb
+++ b/spec/support/shared_contexts/lib/gitlab/sidekiq_middleware/server_metrics_shared_context.rb
@@ -55,6 +55,7 @@ RSpec.shared_context 'server metrics with mocked prometheus' do
allow(Gitlab::Metrics).to receive(:gauge).with(:sidekiq_mem_total_bytes, anything, {}, :all).and_return(sidekiq_mem_total_bytes)
allow(concurrency_metric).to receive(:set)
+ allow(completion_seconds_metric).to receive(:get)
end
end
diff --git a/spec/support/shared_examples/redis/redis_shared_examples.rb b/spec/support/shared_examples/redis/redis_shared_examples.rb
index 4929a753829..1f7834a4d7c 100644
--- a/spec/support/shared_examples/redis/redis_shared_examples.rb
+++ b/spec/support/shared_examples/redis/redis_shared_examples.rb
@@ -86,6 +86,67 @@ RSpec.shared_examples "redis_shared_examples" do
end
end
+ describe '.redis_client_params' do
+ # .redis_client_params wraps over `.redis_store_options` by modifying its outputs
+ # to be compatible with `RedisClient`. We test for compatibility in this block while
+ # the contents of redis_store_options are tested in the `.params` block.
+
+ subject { described_class.new(rails_env).redis_client_params }
+
+ let(:rails_env) { 'development' }
+ let(:config_file_name) { config_old_format_socket }
+
+ shared_examples 'instrumentation_class in custom key' do
+ it 'moves instrumentation class into custom' do
+ expect(subject[:custom][:instrumentation_class]).to eq(described_class.store_name)
+ expect(subject[:instrumentation_class]).to be_nil
+ end
+ end
+
+ context 'when url is host based' do
+ context 'with old format' do
+ let(:config_file_name) { config_old_format_host }
+
+ it 'does not raise ArgumentError for invalid keywords' do
+ expect { RedisClient.config(**subject) }.not_to raise_error
+ end
+
+ it_behaves_like 'instrumentation_class in custom key'
+ end
+
+ context 'with new format' do
+ let(:config_file_name) { config_new_format_host }
+
+ where(:rails_env, :host) do
+ [
+ %w[development development-host],
+ %w[test test-host],
+ %w[production production-host]
+ ]
+ end
+
+ with_them do
+ it 'does not raise ArgumentError for invalid keywords in SentinelConfig' do
+ expect(subject[:name]).to eq(host)
+ expect { RedisClient.sentinel(**subject) }.not_to raise_error
+ end
+
+ it_behaves_like 'instrumentation_class in custom key'
+ end
+ end
+ end
+
+ context 'when url contains unix socket reference' do
+ let(:config_file_name) { config_old_format_socket }
+
+ it 'does not raise ArgumentError for invalid keywords' do
+ expect { RedisClient.config(**subject) }.not_to raise_error
+ end
+
+ it_behaves_like 'instrumentation_class in custom key'
+ end
+ end
+
describe '.params' do
subject { described_class.new(rails_env).params }
diff --git a/spec/support/sidekiq.rb b/spec/support/sidekiq.rb
index b25f39c5e74..6c354c780b2 100644
--- a/spec/support/sidekiq.rb
+++ b/spec/support/sidekiq.rb
@@ -1,13 +1,19 @@
# frozen_string_literal: true
RSpec.configure do |config|
- def gitlab_sidekiq_inline(&block)
+ def gitlab_sidekiq_inline
# We need to cleanup the queues before running jobs in specs because the
# middleware might have written to redis
redis_queues_cleanup!
redis_queues_metadata_cleanup!
- Sidekiq::Testing.inline!(&block)
+
+ # Scoped inline! is thread-safe which breaks capybara specs
+ # see https://github.com/sidekiq/sidekiq/issues/6069
+ Sidekiq::Testing.inline!
+
+ yield
ensure
+ Sidekiq::Testing.fake! # fake is the default so we reset it to that
redis_queues_cleanup!
redis_queues_metadata_cleanup!
end
diff --git a/spec/support/sidekiq_middleware.rb b/spec/support/sidekiq_middleware.rb
index f4d90ff5151..cbd6163d46b 100644
--- a/spec/support/sidekiq_middleware.rb
+++ b/spec/support/sidekiq_middleware.rb
@@ -6,15 +6,6 @@ require 'sidekiq/testing'
module SidekiqMiddleware
def with_sidekiq_server_middleware(&block)
Sidekiq::Testing.server_middleware.clear
-
- if Gem::Version.new(Sidekiq::VERSION) != Gem::Version.new('6.5.12')
- raise 'New version of sidekiq detected, please remove this line'
- end
-
- # This line is a workaround for a Sidekiq bug that is already fixed in v7.0.0
- # https://github.com/mperham/sidekiq/commit/1b83a152786ed382f07fff12d2608534f1e3c922
- Sidekiq::Testing.server_middleware.instance_variable_set(:@config, Sidekiq)
-
Sidekiq::Testing.server_middleware(&block)
ensure
Sidekiq::Testing.server_middleware.clear
diff --git a/tooling/lib/tooling/crystalball/coverage_lines_strategy.rb b/tooling/lib/tooling/crystalball/coverage_lines_strategy.rb
index ebcaab0b8d8..224fa3b9a79 100644
--- a/tooling/lib/tooling/crystalball/coverage_lines_strategy.rb
+++ b/tooling/lib/tooling/crystalball/coverage_lines_strategy.rb
@@ -10,13 +10,15 @@ module Tooling
# modified to use Coverage.start(lines: true)
# This maintains compatibility with SimpleCov on Ruby >= 2.5 with start arguments
# and SimpleCov.start uses Coverage.start(lines: true) by default
+ # See https://github.com/simplecov-ruby/simplecov/blob/v0.22.0/lib/simplecov/configuration.rb#L381
class CoverageLinesStrategy < ::Crystalball::MapGenerator::CoverageStrategy
def initialize(execution_detector = CoverageLinesExecutionDetector)
super(execution_detector)
end
def after_register
- Coverage.start(lines: true)
+ # We might have started SimpleCov already
+ Coverage.start(lines: true) unless SimpleCov.running
end
end
end
diff --git a/tooling/lib/tooling/test_map_generator.rb b/tooling/lib/tooling/test_map_generator.rb
index 88b4353b232..6a3e7337e58 100644
--- a/tooling/lib/tooling/test_map_generator.rb
+++ b/tooling/lib/tooling/test_map_generator.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
require 'yaml'
module Tooling
diff --git a/vendor/gems/bundler-checksum/lib/bundler_checksum/command/lint.rb b/vendor/gems/bundler-checksum/lib/bundler_checksum/command/lint.rb
index 0f1249dcf71..a515a6d31ea 100644
--- a/vendor/gems/bundler-checksum/lib/bundler_checksum/command/lint.rb
+++ b/vendor/gems/bundler-checksum/lib/bundler_checksum/command/lint.rb
@@ -1,6 +1,6 @@
# frozen_string_literal: true
-require 'set'
+require 'set' # rubocop:disable Lint/RedundantRequireStatement -- Ruby 3.1 and earlier needs this. Drop this line after Ruby 3.2+ is only supported.
module BundlerChecksum::Command
module Lint
diff --git a/vendor/gems/sidekiq-reliable-fetch/Gemfile b/vendor/gems/sidekiq-reliable-fetch/Gemfile
index 3bed294f56f..8f86b2fe0b6 100644
--- a/vendor/gems/sidekiq-reliable-fetch/Gemfile
+++ b/vendor/gems/sidekiq-reliable-fetch/Gemfile
@@ -11,4 +11,5 @@ group :test do
gem "pry"
gem 'simplecov', require: false
gem 'stub_env', '~> 1.0'
+ gem 'redis', '~> 4.8'
end
diff --git a/vendor/gems/sidekiq-reliable-fetch/Gemfile.lock b/vendor/gems/sidekiq-reliable-fetch/Gemfile.lock
index aeb163db018..484370fdfcc 100644
--- a/vendor/gems/sidekiq-reliable-fetch/Gemfile.lock
+++ b/vendor/gems/sidekiq-reliable-fetch/Gemfile.lock
@@ -1,46 +1,51 @@
PATH
remote: .
specs:
- gitlab-sidekiq-fetcher (0.10.0)
+ gitlab-sidekiq-fetcher (0.11.0)
json (>= 2.5)
- sidekiq (~> 6.1)
+ sidekiq (~> 7.0)
GEM
remote: https://rubygems.org/
specs:
- coderay (1.1.2)
- connection_pool (2.4.0)
- diff-lcs (1.3)
- docile (1.3.1)
- json (2.5.1)
- method_source (0.9.0)
- pry (0.11.3)
- coderay (~> 1.1.0)
- method_source (~> 0.9.0)
- rack (2.2.6.4)
+ coderay (1.1.3)
+ concurrent-ruby (1.2.2)
+ connection_pool (2.4.1)
+ diff-lcs (1.5.0)
+ docile (1.4.0)
+ json (2.6.3)
+ method_source (1.0.0)
+ pry (0.14.2)
+ coderay (~> 1.1)
+ method_source (~> 1.0)
+ rack (3.0.8)
redis (4.8.1)
- rspec (3.8.0)
- rspec-core (~> 3.8.0)
- rspec-expectations (~> 3.8.0)
- rspec-mocks (~> 3.8.0)
- rspec-core (3.8.0)
- rspec-support (~> 3.8.0)
- rspec-expectations (3.8.1)
+ redis-client (0.18.0)
+ connection_pool
+ rspec (3.12.0)
+ rspec-core (~> 3.12.0)
+ rspec-expectations (~> 3.12.0)
+ rspec-mocks (~> 3.12.0)
+ rspec-core (3.12.2)
+ rspec-support (~> 3.12.0)
+ rspec-expectations (3.12.3)
diff-lcs (>= 1.2.0, < 2.0)
- rspec-support (~> 3.8.0)
- rspec-mocks (3.8.0)
+ rspec-support (~> 3.12.0)
+ rspec-mocks (3.12.6)
diff-lcs (>= 1.2.0, < 2.0)
- rspec-support (~> 3.8.0)
- rspec-support (3.8.0)
- sidekiq (6.5.8)
- connection_pool (>= 2.2.5, < 3)
- rack (~> 2.0)
- redis (>= 4.5.0, < 5)
- simplecov (0.16.1)
+ rspec-support (~> 3.12.0)
+ rspec-support (3.12.1)
+ sidekiq (7.2.0)
+ concurrent-ruby (< 2)
+ connection_pool (>= 2.3.0)
+ rack (>= 2.2.4)
+ redis-client (>= 0.14.0)
+ simplecov (0.22.0)
docile (~> 1.1)
- json (>= 1.8, < 3)
- simplecov-html (~> 0.10.0)
- simplecov-html (0.10.2)
+ simplecov-html (~> 0.11)
+ simplecov_json_formatter (~> 0.1)
+ simplecov-html (0.12.3)
+ simplecov_json_formatter (0.1.4)
stub_env (1.0.4)
rspec (>= 2.0, < 4.0)
@@ -50,6 +55,7 @@ PLATFORMS
DEPENDENCIES
gitlab-sidekiq-fetcher!
pry
+ redis (~> 4.8)
rspec (~> 3)
simplecov
stub_env (~> 1.0)
diff --git a/vendor/gems/sidekiq-reliable-fetch/README.md b/vendor/gems/sidekiq-reliable-fetch/README.md
index 4c7029e3955..5e218a76cd5 100644
--- a/vendor/gems/sidekiq-reliable-fetch/README.md
+++ b/vendor/gems/sidekiq-reliable-fetch/README.md
@@ -6,7 +6,7 @@ fetches from Redis.
It's based on https://github.com/TEA-ebook/sidekiq-reliable-fetch.
-**IMPORTANT NOTE:** Since version `0.7.0` this gem works only with `sidekiq >= 6.1` (which introduced Fetch API breaking changes). Please use version `~> 0.5` if you use older version of the `sidekiq` .
+**IMPORTANT NOTE:** Since version `0.11.0` this gem works only with `sidekiq >= 7` (which introduced Fetch API breaking changes). Please use version `~> 0.10` if you use older version of the `sidekiq` .
**UPGRADE NOTE:** If upgrading from 0.7.0, strongly consider a full deployed step on 0.7.1 before 0.8.0; that fixes a bug in the queue name validation that will hit if sidekiq nodes running 0.7.0 see working queues named by 0.8.0. See https://gitlab.com/gitlab-org/sidekiq-reliable-fetch/-/merge_requests/22
diff --git a/vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec b/vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec
index b656267003a..df89abca4ac 100644
--- a/vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec
+++ b/vendor/gems/sidekiq-reliable-fetch/gitlab-sidekiq-fetcher.gemspec
@@ -1,6 +1,6 @@
Gem::Specification.new do |s|
s.name = 'gitlab-sidekiq-fetcher'
- s.version = '0.10.0'
+ s.version = '0.11.0'
s.authors = ['TEA', 'GitLab']
s.email = 'valery@gitlab.com'
s.license = 'LGPL-3.0'
@@ -10,6 +10,6 @@ Gem::Specification.new do |s|
s.require_paths = ['lib']
s.files = Dir.glob('lib/**/*.*')
s.test_files = Dir.glob('{spec,tests}/**/*.*')
- s.add_dependency 'sidekiq', '~> 6.1'
+ s.add_dependency 'sidekiq', '~> 7.0'
s.add_runtime_dependency 'json', '>= 2.5'
end
diff --git a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb
index e8ee6d7df45..68268dc6ff4 100644
--- a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/base_reliable_fetch.rb
@@ -53,7 +53,7 @@ module Sidekiq
Sidekiq::ReliableFetch
end
- config[:fetch] = fetch_strategy.new(config)
+ config[:fetch_class] = fetch_strategy
Sidekiq.logger.info('GitLab reliable fetch activated!')
@@ -115,18 +115,18 @@ module Sidekiq
attr_reader :cleanup_interval, :last_try_to_take_lease_at, :lease_interval,
:queues, :use_semi_reliable_fetch,
- :strictly_ordered_queues
+ :strictly_ordered_queues, :config
- def initialize(options)
- raise ArgumentError, 'missing queue list' unless options[:queues]
+ def initialize(capsule)
+ raise ArgumentError, 'missing queue list' unless capsule.config.queues
- @config = options
+ @config = capsule.config
@interrupted_set = Sidekiq::InterruptedSet.new
- @cleanup_interval = options.fetch(:cleanup_interval, DEFAULT_CLEANUP_INTERVAL)
- @lease_interval = options.fetch(:lease_interval, DEFAULT_LEASE_INTERVAL)
+ @cleanup_interval = config.fetch(:cleanup_interval, DEFAULT_CLEANUP_INTERVAL)
+ @lease_interval = config.fetch(:lease_interval, DEFAULT_LEASE_INTERVAL)
@last_try_to_take_lease_at = 0
- @strictly_ordered_queues = !!options[:strict]
- @queues = options[:queues].map { |q| "queue:#{q}" }
+ @strictly_ordered_queues = !!config[:strict]
+ @queues = config.queues.map { |q| "queue:#{q}" }
end
def retrieve_work
@@ -140,7 +140,7 @@ module Sidekiq
"#{self.class} does not implement #{__method__}"
end
- def bulk_requeue(inprogress, _options)
+ def bulk_requeue(inprogress)
return if inprogress.empty?
Sidekiq.redis do |conn|
@@ -203,7 +203,7 @@ module Sidekiq
Sidekiq.logger.info('Cleaning working queues')
Sidekiq.redis do |conn|
- conn.scan_each(match: "#{WORKING_QUEUE_PREFIX}:queue:*", count: SCAN_COUNT) do |key|
+ conn.scan(match: "#{WORKING_QUEUE_PREFIX}:queue:*", count: SCAN_COUNT) do |key|
original_queue, identity = extract_queue_and_identity(key)
next if original_queue.nil? || identity.nil?
@@ -235,7 +235,7 @@ module Sidekiq
rescue NameError
end
- max_retries_after_interruption ||= @config[:max_retries_after_interruption]
+ max_retries_after_interruption ||= config[:max_retries_after_interruption]
max_retries_after_interruption ||= DEFAULT_MAX_RETRIES_AFTER_INTERRUPTION
max_retries_after_interruption
end
@@ -264,7 +264,7 @@ module Sidekiq
@last_try_to_take_lease_at = Time.now.to_f
Sidekiq.redis do |conn|
- conn.set(LEASE_KEY, 1, nx: true, ex: cleanup_interval)
+ conn.set(LEASE_KEY, 1, 'nx', 'ex', cleanup_interval)
end
end
diff --git a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb
index 2fc7a10f9d0..799e744957e 100644
--- a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/interrupted_set.rb
@@ -45,7 +45,7 @@ module Sidekiq
end
def self.options
- Sidekiq.respond_to?(:[]) ? Sidekiq : Sidekiq.options
+ Sidekiq.default_configuration
end
end
end
diff --git a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb
index e65d9b6324a..b9855100fb6 100644
--- a/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/lib/sidekiq/semi_reliable_fetch.rb
@@ -7,7 +7,7 @@ module Sidekiq
# for semi-reliable fetch.
DEFAULT_SEMI_RELIABLE_FETCH_TIMEOUT = 5 # seconds
- def initialize(options)
+ def initialize(capsule)
super
@queues = @queues.uniq
@@ -16,7 +16,7 @@ module Sidekiq
private
def retrieve_unit_of_work
- work = Sidekiq.redis { |conn| conn.brpop(*queues_cmd, timeout: semi_reliable_fetch_timeout) }
+ work = brpop_with_sidekiq
return unless work
queue, job = work
@@ -29,6 +29,17 @@ module Sidekiq
unit_of_work
end
+ def brpop_with_sidekiq
+ Sidekiq.redis do |conn|
+ conn.blocking_call(
+ conn.read_timeout + semi_reliable_fetch_timeout,
+ "brpop",
+ *queues_cmd,
+ semi_reliable_fetch_timeout
+ )
+ end
+ end
+
def queues_cmd
if strictly_ordered_queues
@queues
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb b/vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb
index 990d699e2f8..3671a8da39c 100644
--- a/vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/base_reliable_fetch_spec.rb
@@ -3,14 +3,20 @@ require 'fetch_shared_examples'
require 'sidekiq/base_reliable_fetch'
require 'sidekiq/reliable_fetch'
require 'sidekiq/semi_reliable_fetch'
+require 'sidekiq/capsule'
describe Sidekiq::BaseReliableFetch do
let(:job) { Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], jid: 55) }
+ let(:queues) { ['foo'] }
+ let(:options) { { queues: queues } }
+ let(:config) { Sidekiq::Config.new(options) }
+ let(:capsule) { Sidekiq::Capsule.new("default", config) }
+ let(:fetcher) { Sidekiq::ReliableFetch.new(capsule) }
before { Sidekiq.redis(&:flushdb) }
describe 'UnitOfWork' do
- let(:fetcher) { Sidekiq::ReliableFetch.new(queues: ['foo']) }
+ before { config.queues = queues }
describe '#requeue' do
it 'requeues job' do
@@ -40,10 +46,12 @@ describe Sidekiq::BaseReliableFetch do
end
describe '#bulk_requeue' do
- let(:options) { { queues: %w[foo bar] } }
+ let(:queues) { %w[foo bar] }
let!(:queue1) { Sidekiq::Queue.new('foo') }
let!(:queue2) { Sidekiq::Queue.new('bar') }
+ before { config.queues = queues }
+
it 'requeues the bulk' do
uow = described_class::UnitOfWork
jobs = [ uow.new('queue:foo', job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
@@ -57,7 +65,7 @@ describe Sidekiq::BaseReliableFetch do
)
end
- described_class.new(options).bulk_requeue(jobs, nil)
+ described_class.new(capsule).bulk_requeue(jobs)
expect(queue1.size).to eq 2
expect(queue2.size).to eq 1
@@ -67,24 +75,26 @@ describe Sidekiq::BaseReliableFetch do
uow = described_class::UnitOfWork
interrupted_job = Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], interrupted_count: 3)
jobs = [ uow.new('queue:foo', interrupted_job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
- described_class.new(options).bulk_requeue(jobs, nil)
+ described_class.new(capsule).bulk_requeue(jobs)
expect(queue1.size).to eq 1
expect(queue2.size).to eq 1
expect(Sidekiq::InterruptedSet.new.size).to eq 1
end
- it 'does not put jobs into interrupted queue if it is disabled' do
- options[:max_retries_after_interruption] = -1
+ context 'when max_retries_after_interruption is disabled' do
+ let(:options) { { queues: queues, max_retries_after_interruption: -1 } }
- uow = described_class::UnitOfWork
- interrupted_job = Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], interrupted_count: 3)
- jobs = [ uow.new('queue:foo', interrupted_job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
- described_class.new(options).bulk_requeue(jobs, nil)
+ it 'does not put jobs into interrupted queue' do
+ uow = described_class::UnitOfWork
+ interrupted_job = Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], interrupted_count: 3)
+ jobs = [ uow.new('queue:foo', interrupted_job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
+ described_class.new(capsule).bulk_requeue(jobs)
- expect(queue1.size).to eq 2
- expect(queue2.size).to eq 1
- expect(Sidekiq::InterruptedSet.new.size).to eq 0
+ expect(queue1.size).to eq 2
+ expect(queue2.size).to eq 1
+ expect(Sidekiq::InterruptedSet.new.size).to eq 0
+ end
end
it 'does not put jobs into interrupted queue if it is disabled on the worker' do
@@ -93,7 +103,7 @@ describe Sidekiq::BaseReliableFetch do
uow = described_class::UnitOfWork
interrupted_job = Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo'], interrupted_count: 3)
jobs = [ uow.new('queue:foo', interrupted_job), uow.new('queue:foo', job), uow.new('queue:bar', job) ]
- described_class.new(options).bulk_requeue(jobs, nil)
+ described_class.new(capsule).bulk_requeue(jobs)
expect(queue1.size).to eq 2
expect(queue2.size).to eq 1
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb b/vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb
index df7f715f2f9..11489a37b27 100644
--- a/vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/fetch_shared_examples.rb
@@ -1,54 +1,70 @@
shared_examples 'a Sidekiq fetcher' do
let(:queues) { ['assigned'] }
+ let(:options) { { queues: queues } }
+ let(:config) { Sidekiq::Config.new(options) }
+ let(:capsule) { Sidekiq::Capsule.new("default", config) }
- before { Sidekiq.redis(&:flushdb) }
+ before do
+ config.queues = queues
+ Sidekiq.redis(&:flushdb)
+ end
describe '#retrieve_work' do
let(:job) { Sidekiq.dump_json(class: 'Bob', args: [1, 2, 'foo']) }
- let(:fetcher) { described_class.new(queues: queues) }
+ let(:fetcher) { described_class.new(capsule) }
it 'does not clean up orphaned jobs more than once per cleanup interval' do
- Sidekiq.redis = Sidekiq::RedisConnection.create(url: REDIS_URL, size: 10)
-
- expect(fetcher).to receive(:clean_working_queues!).once
+ Sidekiq::Client.via(Sidekiq::RedisConnection.create(url: REDIS_URL, size: 10)) do
+ expect(fetcher).to receive(:clean_working_queues!).once
- threads = 10.times.map do
- Thread.new do
- fetcher.retrieve_work
+ threads = 10.times.map do
+ Thread.new do
+ fetcher.retrieve_work
+ end
end
- end
- threads.map(&:join)
+ threads.map(&:join)
+ end
end
- it 'retrieves by order when strictly order is enabled' do
- fetcher = described_class.new(strict: true, queues: ['first', 'second'])
+ context 'when strictly order is enabled' do
+ let(:queues) { ['first', 'second'] }
+ let(:options) { { strict: true, queues: queues } }
- Sidekiq.redis do |conn|
- conn.rpush('queue:first', ['msg3', 'msg2', 'msg1'])
- conn.rpush('queue:second', 'msg4')
- end
+ it 'retrieves by order' do
+ fetcher = described_class.new(capsule)
+
+ Sidekiq.redis do |conn|
+ conn.rpush('queue:first', ['msg3', 'msg2', 'msg1'])
+ conn.rpush('queue:second', 'msg4')
+ end
- jobs = (1..4).map { fetcher.retrieve_work.job }
+ jobs = (1..4).map { fetcher.retrieve_work.job }
- expect(jobs).to eq ['msg1', 'msg2', 'msg3', 'msg4']
+ expect(jobs).to eq ['msg1', 'msg2', 'msg3', 'msg4']
+ end
end
- it 'does not starve any queue when queues are not strictly ordered' do
- fetcher = described_class.new(queues: ['first', 'second'])
+ context 'when queues are not strictly ordered' do
+ let(:queues) { ['first', 'second'] }
- Sidekiq.redis do |conn|
- conn.rpush('queue:first', (1..200).map { |i| "msg#{i}" })
- conn.rpush('queue:second', 'this_job_should_not_stuck')
- end
+ it 'does not starve any queue' do
+ fetcher = described_class.new(capsule)
- jobs = (1..100).map { fetcher.retrieve_work.job }
+ Sidekiq.redis do |conn|
+ conn.rpush('queue:first', (1..200).map { |i| "msg#{i}" })
+ conn.rpush('queue:second', 'this_job_should_not_stuck')
+ end
+
+ jobs = (1..100).map { fetcher.retrieve_work.job }
- expect(jobs).to include 'this_job_should_not_stuck'
+ expect(jobs).to include 'this_job_should_not_stuck'
+ end
end
shared_examples "basic queue handling" do |queue|
- let (:fetcher) { described_class.new(queues: [queue]) }
+ let(:queues) { [queue] }
+ let(:fetcher) { described_class.new(capsule) }
it 'retrieves the job and puts it to working queue' do
Sidekiq.redis { |conn| conn.rpush("queue:#{queue}", job) }
@@ -150,7 +166,8 @@ shared_examples 'a Sidekiq fetcher' do
context 'with short cleanup interval' do
let(:short_interval) { 1 }
- let(:fetcher) { described_class.new(queues: queues, lease_interval: short_interval, cleanup_interval: short_interval) }
+ let(:options) { { queues: queues, lease_interval: short_interval, cleanup_interval: short_interval } }
+ let(:fetcher) { described_class.new(capsule) }
it 'requeues when there is no heartbeat' do
Sidekiq.redis { |conn| conn.rpush('queue:assigned', job) }
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb b/vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb
index bdef04a021f..b919d610aca 100644
--- a/vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/reliable_fetch_spec.rb
@@ -2,6 +2,7 @@ require 'spec_helper'
require 'fetch_shared_examples'
require 'sidekiq/base_reliable_fetch'
require 'sidekiq/reliable_fetch'
+require 'sidekiq/capsule'
describe Sidekiq::ReliableFetch do
include_examples 'a Sidekiq fetcher'
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb b/vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb
index 5bd40a80277..754cc5a4ef6 100644
--- a/vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/semi_reliable_fetch_spec.rb
@@ -2,6 +2,9 @@ require 'spec_helper'
require 'fetch_shared_examples'
require 'sidekiq/base_reliable_fetch'
require 'sidekiq/semi_reliable_fetch'
+require 'sidekiq/capsule'
+require 'sidekiq/config'
+require 'redis'
describe Sidekiq::SemiReliableFetch do
include_examples 'a Sidekiq fetcher'
@@ -9,7 +12,11 @@ describe Sidekiq::SemiReliableFetch do
describe '#retrieve_work' do
let(:queues) { ['stuff_to_do'] }
let(:options) { { queues: queues } }
- let(:fetcher) { described_class.new(options) }
+ let(:config) { Sidekiq::Config.new(options) }
+ let(:capsule) { Sidekiq::Capsule.new("default", config) }
+ let(:fetcher) { described_class.new(capsule) }
+
+ before { config.queues = queues }
context 'timeout config' do
before do
@@ -20,8 +27,9 @@ describe Sidekiq::SemiReliableFetch do
let(:timeout) { nil }
it 'brpops with the default timeout timeout' do
- Sidekiq.redis do |connection|
- expect(connection).to receive(:brpop).with("queue:stuff_to_do", { timeout: 5 }).once.and_call_original
+ Sidekiq.redis do |conn|
+ expect(conn).to receive(:blocking_call)
+ .with(conn.read_timeout + 5, 'brpop', 'queue:stuff_to_do', 5).once.and_call_original
fetcher.retrieve_work
end
@@ -32,8 +40,9 @@ describe Sidekiq::SemiReliableFetch do
let(:timeout) { '6' }
it 'brpops with the default timeout timeout' do
- Sidekiq.redis do |connection|
- expect(connection).to receive(:brpop).with("queue:stuff_to_do", { timeout: 6 }).once.and_call_original
+ Sidekiq.redis do |conn|
+ expect(conn).to receive(:blocking_call)
+ .with(conn.read_timeout + 6, 'brpop', 'queue:stuff_to_do', 6).once.and_call_original
fetcher.retrieve_work
end
diff --git a/vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb b/vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb
index 45418571579..ab1c5317ff3 100644
--- a/vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/spec/spec_helper.rb
@@ -9,7 +9,7 @@ SimpleCov.start
REDIS_URL = ENV['REDIS_URL'] || 'redis://localhost:6379/10'
Sidekiq.configure_client do |config|
- config.redis = { url: REDIS_URL }
+ config.redis = { url: REDIS_URL, read_timeout: 5 }
end
Sidekiq.logger.level = Logger::ERROR
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb
index f69cca96d80..a8f66a5f041 100644
--- a/vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/interruption/config.rb
@@ -14,6 +14,7 @@ Sidekiq.configure_server do |config|
# These will be ignored for :basic
config[:cleanup_interval] = TEST_CLEANUP_INTERVAL
config[:lease_interval] = TEST_LEASE_INTERVAL
+ config[:queues] = ['default']
Sidekiq::ReliableFetch.setup_reliable_fetch!(config)
end
diff --git a/vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb b/vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb
index 05ffcfca9b5..c516112ccb7 100644
--- a/vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb
+++ b/vendor/gems/sidekiq-reliable-fetch/tests/reliability/config.rb
@@ -23,6 +23,7 @@ Sidekiq.configure_server do |config|
# These will be ignored for :basic
config[:cleanup_interval] = TEST_CLEANUP_INTERVAL
config[:lease_interval] = TEST_LEASE_INTERVAL
+ config[:queues] = ['default']
Sidekiq::ReliableFetch.setup_reliable_fetch!(config)
end