Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Gemfile2
-rw-r--r--Gemfile.lock4
-rw-r--r--app/finders/groups/accepting_group_transfers_finder.rb43
-rw-r--r--app/finders/groups/base.rb17
-rw-r--r--app/finders/groups/user_groups_finder.rb12
-rw-r--r--app/graphql/resolvers/concerns/issue_resolver_arguments.rb61
-rw-r--r--app/graphql/resolvers/concerns/search_arguments.rb36
-rw-r--r--app/graphql/resolvers/work_items_resolver.rb35
-rw-r--r--app/helpers/users/callouts_helper.rb8
-rw-r--r--app/models/user.rb1
-rw-r--r--app/models/users/project_callout.rb3
-rw-r--r--app/workers/object_storage/migrate_uploads_worker.rb57
-rw-r--r--doc/administration/auth/ldap/ldap-troubleshooting.md4
-rw-r--r--doc/administration/auth/oidc.md2
-rw-r--r--doc/administration/geo/disaster_recovery/bring_primary_back.md2
-rw-r--r--doc/administration/geo/replication/container_registry.md6
-rw-r--r--doc/administration/geo/replication/troubleshooting.md2
-rw-r--r--doc/administration/geo/replication/version_specific_upgrades.md8
-rw-r--r--doc/administration/geo/secondary_proxy/index.md4
-rw-r--r--doc/administration/gitaly/praefect.md8
-rw-r--r--doc/administration/monitoring/prometheus/gitlab_metrics.md2
-rw-r--r--doc/administration/object_storage.md8
-rw-r--r--doc/administration/operations/index.md2
-rw-r--r--doc/administration/operations/rails_console.md6
-rw-r--r--doc/administration/operations/ssh_certificates.md2
-rw-r--r--doc/administration/packages/container_registry.md4
-rw-r--r--doc/administration/pages/index.md2
-rw-r--r--doc/administration/raketasks/uploads/migrate.md3
-rw-r--r--doc/administration/redis/replication_and_failover.md2
-rw-r--r--doc/administration/restart_gitlab.md2
-rw-r--r--doc/administration/terraform_state.md4
-rw-r--r--doc/administration/troubleshooting/linux_cheat_sheet.md2
-rw-r--r--doc/administration/troubleshooting/tracing_correlation_id.md2
-rw-r--r--doc/api/graphql/reference/index.md17
-rw-r--r--doc/api/groups.md44
-rw-r--r--doc/api/members.md2
-rw-r--r--doc/api/oauth2.md2
-rw-r--r--doc/api/packages/conan.md2
-rw-r--r--doc/api/repositories.md2
-rw-r--r--doc/architecture/blueprints/ci_data_decay/index.md4
-rw-r--r--doc/architecture/blueprints/ci_scale/index.md6
-rw-r--r--doc/architecture/blueprints/cloud_native_build_logs/index.md6
-rw-r--r--doc/architecture/blueprints/cloud_native_gitlab_pages/index.md6
-rw-r--r--doc/architecture/blueprints/feature_flags_development/index.md4
-rw-r--r--doc/architecture/blueprints/graphql_api/index.md4
-rw-r--r--doc/architecture/blueprints/object_storage/index.md12
-rw-r--r--doc/architecture/blueprints/runner_scaling/index.md4
-rw-r--r--doc/ci/docker/using_docker_build.md4
-rw-r--r--doc/ci/interactive_web_terminal/index.md6
-rw-r--r--doc/ci/pipelines/merge_trains.md2
-rw-r--r--doc/ci/runners/runners_scope.md2
-rw-r--r--doc/development/api_styleguide.md6
-rw-r--r--doc/development/application_slis/index.md4
-rw-r--r--doc/development/auto_devops.md2
-rw-r--r--doc/development/build_test_package.md2
-rw-r--r--doc/development/changelog.md2
-rw-r--r--doc/development/code_intelligence/index.md2
-rw-r--r--doc/development/database/ci_mirrored_tables.md4
-rw-r--r--doc/development/database/client_side_connection_pool.md4
-rw-r--r--doc/development/database/loose_foreign_keys.md2
-rw-r--r--doc/development/database/multiple_databases.md2
-rw-r--r--doc/development/database/strings_and_the_text_data_type.md2
-rw-r--r--doc/development/database/understanding_explain_plans.md2
-rw-r--r--doc/development/distributed_tracing.md8
-rw-r--r--doc/development/ee_features.md2
-rw-r--r--doc/development/elasticsearch.md8
-rw-r--r--doc/development/emails.md6
-rw-r--r--doc/development/fe_guide/graphql.md4
-rw-r--r--doc/development/fe_guide/vuex.md2
-rw-r--r--doc/development/gemfile.md4
-rw-r--r--doc/development/geo/proxying.md2
-rw-r--r--doc/development/git_object_deduplication.md4
-rw-r--r--doc/development/github_importer.md2
-rw-r--r--doc/development/go_guide/dependencies.md4
-rw-r--r--doc/development/go_guide/index.md6
-rw-r--r--doc/development/integrations/secure.md2
-rw-r--r--doc/development/internal_api/index.md2
-rw-r--r--doc/development/lfs.md4
-rw-r--r--doc/development/logging.md4
-rw-r--r--doc/development/merge_request_performance_guidelines.md2
-rw-r--r--doc/development/migration_style_guide.md2
-rw-r--r--doc/development/pipelines.md4
-rw-r--r--doc/development/rails_update.md2
-rw-r--r--doc/development/real_time.md2
-rw-r--r--doc/development/redis/new_redis_instance.md2
-rw-r--r--doc/development/routing.md2
-rw-r--r--doc/development/scalability.md18
-rw-r--r--doc/development/service_ping/metrics_instrumentation.md2
-rw-r--r--doc/development/sidekiq/compatibility_across_updates.md6
-rw-r--r--doc/development/sidekiq/idempotent_jobs.md2
-rw-r--r--doc/development/sql.md2
-rw-r--r--doc/development/testing_guide/end_to_end/feature_flags.md2
-rw-r--r--doc/development/testing_guide/end_to_end/index.md12
-rw-r--r--doc/development/testing_guide/index.md2
-rw-r--r--doc/development/testing_guide/testing_migrations_guide.md2
-rw-r--r--doc/development/workhorse/index.md2
-rw-r--r--doc/install/azure/index.md2
-rw-r--r--doc/install/installation.md4
-rw-r--r--doc/operations/metrics/embed_grafana.md2
-rw-r--r--doc/policy/maintenance.md2
-rw-r--r--doc/raketasks/backup_gitlab.md4
-rw-r--r--doc/raketasks/backup_restore.md4
-rw-r--r--doc/security/information_exclusivity.md2
-rw-r--r--doc/subscriptions/gitlab_com/index.md2
-rw-r--r--doc/subscriptions/index.md2
-rw-r--r--doc/topics/release_your_application.md6
-rw-r--r--doc/update/index.md6
-rw-r--r--doc/update/upgrading_from_source.md2
-rw-r--r--doc/update/zero_downtime.md4
-rw-r--r--doc/user/admin_area/review_abuse_reports.md2
-rw-r--r--doc/user/application_security/dast/checks/16.7.md2
-rw-r--r--doc/user/application_security/dast/checks/209.1.md10
-rw-r--r--doc/user/application_security/policies/index.md2
-rw-r--r--doc/user/clusters/agent/ci_cd_workflow.md2
-rw-r--r--doc/user/clusters/environments.md2
-rw-r--r--doc/user/gitlab_com/index.md6
-rw-r--r--doc/user/group/iterations/index.md6
-rw-r--r--doc/user/infrastructure/clusters/manage/management_project_applications/certmanager.md4
-rw-r--r--doc/user/packages/generic_packages/index.md2
-rw-r--r--doc/user/project/canary_deployments.md4
-rw-r--r--doc/user/project/clusters/add_gke_clusters.md2
-rw-r--r--doc/user/project/deploy_boards.md2
-rw-r--r--doc/user/project/git_attributes.md2
-rw-r--r--doc/user/project/import/clearcase.md2
-rw-r--r--doc/user/project/integrations/pumble.md4
-rw-r--r--doc/user/project/members/share_project_with_groups.md2
-rw-r--r--doc/user/project/pages/getting_started/pages_from_scratch.md2
-rw-r--r--doc/user/project/push_options.md2
-rw-r--r--doc/user/project/repository/reducing_the_repo_size_using_git.md2
-rw-r--r--doc/user/usage_quotas.md4
-rw-r--r--lib/api/groups.rb19
-rw-r--r--lib/gitlab/uploads/migration_helper.rb38
-rw-r--r--lib/tasks/gitlab/uploads/migrate.rake22
-rw-r--r--locale/gitlab.pot12
-rw-r--r--spec/finders/groups/accepting_group_transfers_finder_spec.rb93
-rw-r--r--spec/graphql/resolvers/issues_resolver_spec.rb52
-rw-r--r--spec/graphql/resolvers/work_items_resolver_spec.rb52
-rw-r--r--spec/helpers/commits_helper_spec.rb2
-rw-r--r--spec/helpers/users/callouts_helper_spec.rb4
-rw-r--r--spec/requests/api/graphql/project/issues_spec.rb24
-rw-r--r--spec/requests/api/graphql/project/work_items_spec.rb24
-rw-r--r--spec/requests/api/groups_spec.rb75
-rw-r--r--spec/support/shared_examples/graphql/resolvers/issuable_resolvers_shared_examples.rb95
-rw-r--r--spec/support/shared_examples/requests/api/graphql/issuable_search_shared_examples.rb14
-rw-r--r--spec/support/shared_examples/tasks/gitlab/uploads/migration_shared_examples.rb31
-rw-r--r--spec/tasks/gitlab/uploads/migrate_rake_spec.rb150
-rw-r--r--spec/uploaders/workers/object_storage/migrate_uploads_worker_spec.rb162
147 files changed, 890 insertions, 736 deletions
diff --git a/Gemfile b/Gemfile
index 7a08950449d..63f61a40e1f 100644
--- a/Gemfile
+++ b/Gemfile
@@ -187,7 +187,7 @@ gem 'rack', '~> 2.2.4'
gem 'rack-timeout', '~> 0.6.0', require: 'rack/timeout/base'
group :puma do
- gem 'puma', '~> 5.6.4', require: false
+ gem 'puma', '~> 5.6.5', require: false
gem 'puma_worker_killer', '~> 0.3.1', require: false
gem 'sd_notify', '~> 0.1.0', require: false
end
diff --git a/Gemfile.lock b/Gemfile.lock
index a09492a59d7..60e415febe6 100644
--- a/Gemfile.lock
+++ b/Gemfile.lock
@@ -1019,7 +1019,7 @@ GEM
tty-markdown
tty-prompt
public_suffix (4.0.7)
- puma (5.6.4)
+ puma (5.6.5)
nio4r (~> 2.0)
puma_worker_killer (0.3.1)
get_process_mem (~> 0.2)
@@ -1685,7 +1685,7 @@ DEPENDENCIES
pry-byebug
pry-rails (~> 0.3.9)
pry-shell (~> 0.5.1)
- puma (~> 5.6.4)
+ puma (~> 5.6.5)
puma_worker_killer (~> 0.3.1)
rack (~> 2.2.4)
rack-attack (~> 6.6.0)
diff --git a/app/finders/groups/accepting_group_transfers_finder.rb b/app/finders/groups/accepting_group_transfers_finder.rb
new file mode 100644
index 00000000000..03f5e492d27
--- /dev/null
+++ b/app/finders/groups/accepting_group_transfers_finder.rb
@@ -0,0 +1,43 @@
+# frozen_string_literal: true
+
+module Groups
+ class AcceptingGroupTransfersFinder < Base
+ def initialize(current_user, group_to_be_transferred, params = {})
+ @current_user = current_user
+ @group_to_be_transferred = group_to_be_transferred
+ @params = params
+ end
+
+ def execute
+ return Group.none unless can_transfer_group?
+
+ items = find_groups
+ items = by_search(items)
+
+ sort(items)
+ end
+
+ private
+
+ attr_reader :current_user, :group_to_be_transferred, :params
+
+ def find_groups
+ GroupsFinder.new( # rubocop: disable CodeReuse/Finder
+ current_user,
+ min_access_level: Gitlab::Access::OWNER,
+ exclude_group_ids: exclude_groups
+ ).execute.without_order
+ end
+
+ def exclude_groups
+ exclude_groups = group_to_be_transferred.self_and_descendants.pluck_primary_key
+ exclude_groups << group_to_be_transferred.parent_id if group_to_be_transferred.parent_id
+
+ exclude_groups
+ end
+
+ def can_transfer_group?
+ Ability.allowed?(current_user, :admin_group, group_to_be_transferred)
+ end
+ end
+end
diff --git a/app/finders/groups/base.rb b/app/finders/groups/base.rb
new file mode 100644
index 00000000000..d7f56b1a7a6
--- /dev/null
+++ b/app/finders/groups/base.rb
@@ -0,0 +1,17 @@
+# frozen_string_literal: true
+
+module Groups
+ class Base
+ private
+
+ def sort(items)
+ items.order(Group.arel_table[:path].asc, Group.arel_table[:id].asc) # rubocop: disable CodeReuse/ActiveRecord
+ end
+
+ def by_search(items)
+ return items if params[:search].blank?
+
+ items.search(params[:search], include_parents: true)
+ end
+ end
+end
diff --git a/app/finders/groups/user_groups_finder.rb b/app/finders/groups/user_groups_finder.rb
index bda8b7cc1e0..b58c1323b1f 100644
--- a/app/finders/groups/user_groups_finder.rb
+++ b/app/finders/groups/user_groups_finder.rb
@@ -13,7 +13,7 @@
#
# Initially created to filter user groups and descendants where the user can create projects
module Groups
- class UserGroupsFinder
+ class UserGroupsFinder < Base
def initialize(current_user, target_user, params = {})
@current_user = current_user
@target_user = target_user
@@ -34,16 +34,6 @@ module Groups
attr_reader :current_user, :target_user, :params
- def sort(items)
- items.order(Group.arel_table[:path].asc, Group.arel_table[:id].asc) # rubocop: disable CodeReuse/ActiveRecord
- end
-
- def by_search(items)
- return items if params[:search].blank?
-
- items.search(params[:search], include_parents: true)
- end
-
def by_permission_scope
if permission_scope_create_projects?
target_user.manageable_groups(include_groups_with_developer_maintainer_access: true)
diff --git a/app/graphql/resolvers/concerns/issue_resolver_arguments.rb b/app/graphql/resolvers/concerns/issue_resolver_arguments.rb
index fe213936f55..15ab4435204 100644
--- a/app/graphql/resolvers/concerns/issue_resolver_arguments.rb
+++ b/app/graphql/resolvers/concerns/issue_resolver_arguments.rb
@@ -76,34 +76,18 @@ module IssueResolverArguments
end
def resolve_with_lookahead(**args)
- # The project could have been loaded in batch by `BatchLoader`.
- # At this point we need the `id` of the project to query for issues, so
- # make sure it's loaded and not `nil` before continuing.
- parent = object.respond_to?(:sync) ? object.sync : object
- return Issue.none if parent.nil?
-
- # Will need to be made group & namespace aware with
- # https://gitlab.com/gitlab-org/gitlab-foss/issues/54520
- args[:not] = args[:not].to_h if args[:not].present?
- args[:iids] ||= [args.delete(:iid)].compact if args[:iid]
- args[:attempt_project_search_optimizations] = true if args[:search].present?
-
- prepare_assignee_username_params(args)
- prepare_release_tag_params(args)
+ return Issue.none if resource_parent.nil?
- finder = IssuesFinder.new(current_user, args)
+ finder = IssuesFinder.new(current_user, prepare_finder_params(args))
- continue_issue_resolve(parent, finder, **args)
+ continue_issue_resolve(resource_parent, finder, **args)
end
def ready?(**args)
- args[:not] = args[:not].to_h if args[:not].present?
-
params_not_mutually_exclusive(args, mutually_exclusive_assignee_username_args)
params_not_mutually_exclusive(args, mutually_exclusive_milestone_args)
params_not_mutually_exclusive(args.fetch(:not, {}), mutually_exclusive_milestone_args)
params_not_mutually_exclusive(args, mutually_exclusive_release_tag_args)
- validate_anonymous_search_access! if args[:search].present?
super
end
@@ -128,6 +112,18 @@ module IssueResolverArguments
private
+ def prepare_finder_params(args)
+ params = super(args)
+ params[:not] = params[:not].to_h if params[:not].present?
+ params[:iids] ||= [params.delete(:iid)].compact if params[:iid]
+ params[:attempt_project_search_optimizations] = true if params[:search].present?
+
+ prepare_assignee_username_params(params)
+ prepare_release_tag_params(params)
+
+ params
+ end
+
def prepare_release_tag_params(args)
release_tag_wildcard = args.delete(:release_tag_wildcard_id)
return if release_tag_wildcard.blank?
@@ -135,20 +131,13 @@ module IssueResolverArguments
args[:release_tag] ||= release_tag_wildcard
end
- def mutually_exclusive_release_tag_args
- [:release_tag, :release_tag_wildcard_id]
- end
-
def prepare_assignee_username_params(args)
args[:assignee_username] = args.delete(:assignee_usernames) if args[:assignee_usernames].present?
args[:not][:assignee_username] = args[:not].delete(:assignee_usernames) if args.dig(:not, :assignee_usernames).present?
end
- def params_not_mutually_exclusive(args, mutually_exclusive_args)
- if args.slice(*mutually_exclusive_args).compact.size > 1
- arg_str = mutually_exclusive_args.map { |x| x.to_s.camelize(:lower) }.join(', ')
- raise ::Gitlab::Graphql::Errors::ArgumentError, "only one of [#{arg_str}] arguments is allowed at the same time."
- end
+ def mutually_exclusive_release_tag_args
+ [:release_tag, :release_tag_wildcard_id]
end
def mutually_exclusive_milestone_args
@@ -158,4 +147,20 @@ module IssueResolverArguments
def mutually_exclusive_assignee_username_args
[:assignee_usernames, :assignee_username]
end
+
+ def params_not_mutually_exclusive(args, mutually_exclusive_args)
+ if args.slice(*mutually_exclusive_args).compact.size > 1
+ arg_str = mutually_exclusive_args.map { |x| x.to_s.camelize(:lower) }.join(', ')
+ raise ::Gitlab::Graphql::Errors::ArgumentError, "only one of [#{arg_str}] arguments is allowed at the same time."
+ end
+ end
+
+ def resource_parent
+ # The project could have been loaded in batch by `BatchLoader`.
+ # At this point we need the `id` of the project to query for issues, so
+ # make sure it's loaded and not `nil` before continuing.
+ strong_memoize(:resource_parent) do
+ object.respond_to?(:sync) ? object.sync : object
+ end
+ end
end
diff --git a/app/graphql/resolvers/concerns/search_arguments.rb b/app/graphql/resolvers/concerns/search_arguments.rb
index 7f480f9d0b6..86c7140af0a 100644
--- a/app/graphql/resolvers/concerns/search_arguments.rb
+++ b/app/graphql/resolvers/concerns/search_arguments.rb
@@ -7,12 +7,48 @@ module SearchArguments
argument :search, GraphQL::Types::String,
required: false,
description: 'Search query for title or description.'
+ argument :in, [Types::IssuableSearchableFieldEnum],
+ required: false,
+ description: <<~DESC
+ Specify the fields to perform the search in.
+ Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'
+ DESC
+ end
+
+ def ready?(**args)
+ validate_search_in_params!(args)
+ validate_anonymous_search_access!
+
+ super
end
+ private
+
def validate_anonymous_search_access!
return if current_user.present? || Feature.disabled?(:disable_anonymous_search, type: :ops)
raise ::Gitlab::Graphql::Errors::ArgumentError,
"User must be authenticated to include the `search` argument."
end
+
+ def validate_search_in_params!(args)
+ return unless args[:in].present? && args[:search].blank?
+
+ raise Gitlab::Graphql::Errors::ArgumentError,
+ '`search` should be present when including the `in` argument'
+ end
+
+ def prepare_finder_params(args)
+ prepare_search_params(args)
+ end
+
+ def prepare_search_params(args)
+ return args unless args[:search].present?
+
+ parent_type = resource_parent.is_a?(Project) ? :project : :group
+ args[:"attempt_#{parent_type}_search_optimizations"] = true
+ args[:in] = args[:in].join(',') if args[:in].present?
+
+ args
+ end
end
diff --git a/app/graphql/resolvers/work_items_resolver.rb b/app/graphql/resolvers/work_items_resolver.rb
index 055984db3cb..49f9633967d 100644
--- a/app/graphql/resolvers/work_items_resolver.rb
+++ b/app/graphql/resolvers/work_items_resolver.rb
@@ -26,24 +26,11 @@ module Resolvers
required: false
def resolve_with_lookahead(**args)
- # The project could have been loaded in batch by `BatchLoader`.
- # At this point we need the `id` of the project to query for issues, so
- # make sure it's loaded and not `nil` before continuing.
- parent = object.respond_to?(:sync) ? object.sync : object
- return WorkItem.none if parent.nil? || !parent.work_items_feature_flag_enabled?
+ return WorkItem.none if resource_parent.nil? || !resource_parent.work_items_feature_flag_enabled?
- args[:iids] ||= [args.delete(:iid)].compact if args[:iid]
- args[:attempt_project_search_optimizations] = true if args[:search].present?
+ finder = ::WorkItems::WorkItemsFinder.new(current_user, prepare_finder_params(args))
- finder = ::WorkItems::WorkItemsFinder.new(current_user, args)
-
- Gitlab::Graphql::Loaders::IssuableLoader.new(parent, finder).batching_find_all { |q| apply_lookahead(q) }
- end
-
- def ready?(**args)
- validate_anonymous_search_access! if args[:search].present?
-
- super
+ Gitlab::Graphql::Loaders::IssuableLoader.new(resource_parent, finder).batching_find_all { |q| apply_lookahead(q) }
end
private
@@ -56,6 +43,22 @@ module Resolvers
:author
]
end
+
+ def prepare_finder_params(args)
+ params = super(args)
+ params[:iids] ||= [params.delete(:iid)].compact if params[:iid]
+
+ params
+ end
+
+ def resource_parent
+ # The project could have been loaded in batch by `BatchLoader`.
+ # At this point we need the `id` of the project to query for work items, so
+ # make sure it's loaded and not `nil` before continuing.
+ strong_memoize(:resource_parent) do
+ object.respond_to?(:sync) ? object.sync : object
+ end
+ end
end
end
diff --git a/app/helpers/users/callouts_helper.rb b/app/helpers/users/callouts_helper.rb
index 3dd6b3f4a80..b08de4edb62 100644
--- a/app/helpers/users/callouts_helper.rb
+++ b/app/helpers/users/callouts_helper.rb
@@ -71,18 +71,18 @@ module Users
last_failure = DateTime.parse(last_failure) if last_failure
- user_dismissed?(WEB_HOOK_DISABLED, last_failure, namespace: project.namespace)
+ user_dismissed?(WEB_HOOK_DISABLED, last_failure, project: project)
end
private
- def user_dismissed?(feature_name, ignore_dismissal_earlier_than = nil, namespace: nil)
+ def user_dismissed?(feature_name, ignore_dismissal_earlier_than = nil, project: nil)
return false unless current_user
query = { feature_name: feature_name, ignore_dismissal_earlier_than: ignore_dismissal_earlier_than }
- if namespace
- current_user.dismissed_callout_for_namespace?(namespace: namespace, **query)
+ if project
+ current_user.dismissed_callout_for_project?(project: project, **query)
else
current_user.dismissed_callout?(**query)
end
diff --git a/app/models/user.rb b/app/models/user.rb
index afee2d70844..940547baac0 100644
--- a/app/models/user.rb
+++ b/app/models/user.rb
@@ -2072,6 +2072,7 @@ class User < ApplicationRecord
callout_dismissed?(callout, ignore_dismissal_earlier_than)
end
+ # Deprecated: do not use. See: https://gitlab.com/gitlab-org/gitlab/-/issues/371017
def dismissed_callout_for_namespace?(feature_name:, namespace:, ignore_dismissal_earlier_than: nil)
source_feature_name = "#{feature_name}_#{namespace.id}"
callout = namespace_callouts_by_feature_name[source_feature_name]
diff --git a/app/models/users/project_callout.rb b/app/models/users/project_callout.rb
index ddc5f8fb4de..aa9562ed693 100644
--- a/app/models/users/project_callout.rb
+++ b/app/models/users/project_callout.rb
@@ -9,7 +9,8 @@ module Users
belongs_to :project
enum feature_name: {
- awaiting_members_banner: 1 # EE-only
+ awaiting_members_banner: 1, # EE-only
+ web_hook_disabled: 2
}
validates :project, presence: true
diff --git a/app/workers/object_storage/migrate_uploads_worker.rb b/app/workers/object_storage/migrate_uploads_worker.rb
index b7d938e6b68..3e681c3f111 100644
--- a/app/workers/object_storage/migrate_uploads_worker.rb
+++ b/app/workers/object_storage/migrate_uploads_worker.rb
@@ -11,7 +11,7 @@ module ObjectStorage
include ObjectStorageQueue
feature_category :not_owned # rubocop:todo Gitlab/AvoidFeatureCategoryNotOwned
- loggable_arguments 0, 1, 2, 3
+ loggable_arguments 0
SanityCheckError = Class.new(StandardError)
@@ -67,41 +67,19 @@ module ObjectStorage
include Report
# rubocop: disable CodeReuse/ActiveRecord
- def self.enqueue!(uploads, model_class, mounted_as, to_store)
- sanity_check!(uploads, model_class, mounted_as)
-
- perform_async(uploads.ids, model_class.to_s, mounted_as, to_store)
+ def self.enqueue!(uploads, to_store)
+ perform_async(uploads.ids, to_store)
end
# rubocop: enable CodeReuse/ActiveRecord
- # We need to be sure all the uploads are for the same uploader and model type
- # and that the mount point exists if provided.
- #
- def self.sanity_check!(uploads, model_class, mounted_as)
- upload = uploads.first
- uploader_class = upload.uploader.constantize
- uploader_types = uploads.map(&:uploader).uniq
- model_types = uploads.map(&:model_type).uniq
- model_has_mount = mounted_as.nil? || model_class.uploaders[mounted_as] == uploader_class
-
- raise(SanityCheckError, _("Multiple uploaders found: %{uploader_types}") % { uploader_types: uploader_types }) unless uploader_types.count == 1
- raise(SanityCheckError, _("Multiple model types found: %{model_types}") % { model_types: model_types }) unless model_types.count == 1
- raise(SanityCheckError, _("Mount point %{mounted_as} not found in %{model_class}.") % { mounted_as: mounted_as, model_class: model_class }) unless model_has_mount
- end
-
# rubocop: disable CodeReuse/ActiveRecord
def perform(*args)
- args_check!(args)
-
- (ids, model_type, mounted_as, to_store) = args
+ ids, to_store = retrieve_applicable_args!(args)
- @model_class = model_type.constantize
- @mounted_as = mounted_as&.to_sym
@to_store = to_store
uploads = Upload.preload(:model).where(id: ids)
- sanity_check!(uploads)
results = migrate(uploads)
report!(results)
@@ -111,31 +89,22 @@ module ObjectStorage
end
# rubocop: enable CodeReuse/ActiveRecord
- def sanity_check!(uploads)
- self.class.sanity_check!(uploads, @model_class, @mounted_as)
- end
-
- def args_check!(args)
- return if args.count == 4
+ private
- case args.count
- when 3 then raise SanityCheckError, _("Job is missing the `model_type` argument.")
- else
- raise SanityCheckError, _("Job has wrong arguments format.")
- end
- end
+ def retrieve_applicable_args!(args)
+ return args if args.count == 2
+ return args.values_at(0, 3) if args.count == 4
- def build_uploaders(uploads)
- uploads.map { |upload| upload.retrieve_uploader(@mounted_as) }
+ raise SanityCheckError, _("Job has wrong arguments format.")
end
def migrate(uploads)
- build_uploaders(uploads).map(&method(:process_uploader))
+ uploads.map(&method(:process_upload))
end
- def process_uploader(uploader)
- MigrationResult.new(uploader.upload).tap do |result|
- uploader.migrate!(@to_store)
+ def process_upload(upload)
+ MigrationResult.new(upload).tap do |result|
+ upload.retrieve_uploader.migrate!(@to_store)
rescue StandardError => e
result.error = e
end
diff --git a/doc/administration/auth/ldap/ldap-troubleshooting.md b/doc/administration/auth/ldap/ldap-troubleshooting.md
index 64ef27cbf51..c242babe6c8 100644
--- a/doc/administration/auth/ldap/ldap-troubleshooting.md
+++ b/doc/administration/auth/ldap/ldap-troubleshooting.md
@@ -210,7 +210,7 @@ This shows you which user has this email address. One of two steps must be taken
remove this email as a secondary email and make it a primary one so GitLab
associates this profile to the LDAP identity.
-The user can do either of these steps
+The user can do either of these steps
[in their profile](../../../user/profile/index.md#access-your-user-profile) or an administrator can do it.
#### Projects limit errors
@@ -430,7 +430,7 @@ Next, [learn how to read the output](#example-console-output-after-a-group-sync)
##### Example console output after a group sync
-Like the output from the user sync, the output from the
+Like the output from the user sync, the output from the
[manual group sync](#sync-all-groups) is also very verbose. However, it contains lots
of helpful information.
diff --git a/doc/administration/auth/oidc.md b/doc/administration/auth/oidc.md
index 8c5bf96e99e..9f3c96902f8 100644
--- a/doc/administration/auth/oidc.md
+++ b/doc/administration/auth/oidc.md
@@ -250,7 +250,7 @@ but `LocalAccounts` works for authenticating against local, Active Directory acc
<OutputClaim ClaimTypeReferenceId="signInNames.emailAddress" PartnerClaimType="email" />
```
-1. For OIDC discovery to work with B2C, the policy must be configured with an issuer compatible with the
+1. For OIDC discovery to work with B2C, the policy must be configured with an issuer compatible with the
[OIDC specification](https://openid.net/specs/openid-connect-discovery-1_0.html#rfc.section.4.3).
See the [token compatibility settings](https://docs.microsoft.com/en-us/azure/active-directory-b2c/configure-tokens?pivots=b2c-custom-policy#token-compatibility-settings).
In `TrustFrameworkBase.xml` under `JwtIssuer`, set `IssuanceClaimPattern` to `AuthorityWithTfp`:
diff --git a/doc/administration/geo/disaster_recovery/bring_primary_back.md b/doc/administration/geo/disaster_recovery/bring_primary_back.md
index a2d4f35a7c3..1991b747af0 100644
--- a/doc/administration/geo/disaster_recovery/bring_primary_back.md
+++ b/doc/administration/geo/disaster_recovery/bring_primary_back.md
@@ -41,7 +41,7 @@ To bring the former **primary** site up to date:
NOTE:
If you [changed the DNS records](index.md#step-4-optional-updating-the-primary-domain-dns-record)
- for this site during disaster recovery procedure you may need to
+ for this site during disaster recovery procedure you may need to
[block all the writes to this site](planned_failover.md#prevent-updates-to-the-primary-site)
during this procedure.
diff --git a/doc/administration/geo/replication/container_registry.md b/doc/administration/geo/replication/container_registry.md
index b425e5dcc0d..510c8745349 100644
--- a/doc/administration/geo/replication/container_registry.md
+++ b/doc/administration/geo/replication/container_registry.md
@@ -12,7 +12,7 @@ You can set up a Container Registry on your **secondary** Geo site that mirrors
## Supported container registries
Geo supports the following types of container registries:
-
+
- [Docker](https://docs.docker.com/registry/)
- [OCI](https://github.com/opencontainers/distribution-spec/blob/main/spec.md)
@@ -26,7 +26,7 @@ The following container image formats are support by Geo:
In addition, Geo also supports [BuildKit cache images](https://github.com/moby/buildkit).
-## Supported storage
+## Supported storage
### Docker
@@ -34,7 +34,7 @@ For more information on supported registry storage drivers see
[Docker registry storage drivers](https://docs.docker.com/registry/storage-drivers/)
Read the [Load balancing considerations](https://docs.docker.com/registry/deploying/#load-balancing-considerations)
-when deploying the Registry, and how to set up the storage driver for the GitLab integrated
+when deploying the Registry, and how to set up the storage driver for the GitLab integrated
[Container Registry](../../packages/container_registry.md#use-object-storage).
### Registries that support OCI artifacts
diff --git a/doc/administration/geo/replication/troubleshooting.md b/doc/administration/geo/replication/troubleshooting.md
index c03df7ec5fc..8d4235917df 100644
--- a/doc/administration/geo/replication/troubleshooting.md
+++ b/doc/administration/geo/replication/troubleshooting.md
@@ -868,7 +868,7 @@ or `gitlab-ctl promote-to-primary-node`, either:
```
- Upgrade to GitLab 12.6.3 or later if it is safe to do so. For example,
- if the failover was just a test. A
+ if the failover was just a test. A
[caching-related bug](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/22021) was fixed.
### Message: `ActiveRecord::RecordInvalid: Validation failed: Enabled Geo primary node cannot be disabled`
diff --git a/doc/administration/geo/replication/version_specific_upgrades.md b/doc/administration/geo/replication/version_specific_upgrades.md
index 350310c7076..add35883f31 100644
--- a/doc/administration/geo/replication/version_specific_upgrades.md
+++ b/doc/administration/geo/replication/version_specific_upgrades.md
@@ -183,8 +183,8 @@ GitLab 13.9 through GitLab 14.3 are affected by a bug in which enabling [GitLab
each upgraded reference. Delay any upgrade attempts until this is in the
[13.7.5 patch release.](https://gitlab.com/gitlab-org/gitaly/-/merge_requests/3002).
More details are available [in this issue](https://gitlab.com/gitlab-org/git/-/issues/79).
-- A new secret is generated in `/etc/gitlab/gitlab-secrets.json`.
- In an HA GitLab or GitLab Geo environment, secrets need to be the same on all nodes.
+- A new secret is generated in `/etc/gitlab/gitlab-secrets.json`.
+ In an HA GitLab or GitLab Geo environment, secrets need to be the same on all nodes.
Ensure this new secret is also accounted for if you are manually syncing the file across
nodes, or manually specifying secrets in `/etc/gitlab/gitlab.rb`.
@@ -247,7 +247,7 @@ the recommended procedure, see the
## Upgrading to GitLab 12.9
WARNING:
-GitLab 12.9.0 through GitLab 12.9.3 are affected by
+GitLab 12.9.0 through GitLab 12.9.3 are affected by
[a bug that stops repository verification](https://gitlab.com/gitlab-org/gitlab/-/issues/213523).
The issue is fixed in GitLab 12.9.4. Upgrade to GitLab 12.9.4 or later.
@@ -401,6 +401,6 @@ For the recommended procedure, see the
## Upgrading to GitLab 12.0
WARNING:
-This version is affected by a
+This version is affected by a
[bug that results in new LFS objects not being replicated to Geo secondary sites](https://gitlab.com/gitlab-org/gitlab/-/issues/32696).
The issue is fixed in GitLab 12.1. Be sure to upgrade to GitLab 12.1 or later.
diff --git a/doc/administration/geo/secondary_proxy/index.md b/doc/administration/geo/secondary_proxy/index.md
index 6c1812b2754..2183f57df23 100644
--- a/doc/administration/geo/secondary_proxy/index.md
+++ b/doc/administration/geo/secondary_proxy/index.md
@@ -112,8 +112,8 @@ gitlab:
Since GitLab 15.1, Geo secondary proxying is enabled by default for separate URLs also.
-There are minor known issues linked in the
-["Geo secondary proxying with separate URLs" epic](https://gitlab.com/groups/gitlab-org/-/epics/6865).
+There are minor known issues linked in the
+["Geo secondary proxying with separate URLs" epic](https://gitlab.com/groups/gitlab-org/-/epics/6865).
You can also add feedback in the epic about any use-cases that
are not possible anymore with proxying enabled.
diff --git a/doc/administration/gitaly/praefect.md b/doc/administration/gitaly/praefect.md
index 7e02211b543..2847e6c4cd8 100644
--- a/doc/administration/gitaly/praefect.md
+++ b/doc/administration/gitaly/praefect.md
@@ -97,7 +97,7 @@ If you [installed](https://about.gitlab.com/install/) GitLab using the Omnibus G
### Preparation
-Before beginning, you should already have a working GitLab instance.
+Before beginning, you should already have a working GitLab instance.
[Learn how to install GitLab](https://about.gitlab.com/install/).
Provision a PostgreSQL server. We recommend using the PostgreSQL that is shipped
@@ -332,7 +332,7 @@ To configure the additional connection, you must either:
#### Configure a new PgBouncer database with `pool_mode = session`
We recommend using PgBouncer with `session` pool mode. You can use the
-[bundled PgBouncer](../postgresql/pgbouncer.md) or use an external PgBouncer and
+[bundled PgBouncer](../postgresql/pgbouncer.md) or use an external PgBouncer and
[configure it manually](https://www.pgbouncer.org/config.html).
The following example uses the bundled PgBouncer and sets up two separate connection pools on PostgreSQL host,
@@ -621,7 +621,7 @@ Updates to example must be made at:
gitlab-ctl reconfigure
```
-1. To ensure that Praefect
+1. To ensure that Praefect
[has updated its Prometheus listen address](https://gitlab.com/gitlab-org/gitaly/-/issues/2734),
[restart Praefect](../restart_gitlab.md#omnibus-gitlab-restart):
@@ -929,7 +929,7 @@ For more information on Gitaly server configuration, see our
gitlab-ctl reconfigure
```
-1. To ensure that Gitaly
+1. To ensure that Gitaly
[has updated its Prometheus listen address](https://gitlab.com/gitlab-org/gitaly/-/issues/2734),
[restart Gitaly](../restart_gitlab.md#omnibus-gitlab-restart):
diff --git a/doc/administration/monitoring/prometheus/gitlab_metrics.md b/doc/administration/monitoring/prometheus/gitlab_metrics.md
index a2def8a9f64..46793a7b8d8 100644
--- a/doc/administration/monitoring/prometheus/gitlab_metrics.md
+++ b/doc/administration/monitoring/prometheus/gitlab_metrics.md
@@ -390,7 +390,7 @@ Some basic Ruby runtime metrics are available:
## Redis metrics
These client metrics are meant to complement Redis server metrics.
-These metrics are broken down per
+These metrics are broken down per
[Redis instance](https://docs.gitlab.com/omnibus/settings/redis.html#running-with-multiple-redis-instances).
These metrics all have a `storage` label which indicates the Redis
instance (`cache`, `shared_state`, and so on).
diff --git a/doc/administration/object_storage.md b/doc/administration/object_storage.md
index 0299d5f8b0c..e2e7e74be53 100644
--- a/doc/administration/object_storage.md
+++ b/doc/administration/object_storage.md
@@ -26,7 +26,7 @@ GitLab has been tested by vendors and customers on a number of object storage pr
### Known compatibility issues
-- Dell EMC ECS: Prior to GitLab 13.3, there is a
+- Dell EMC ECS: Prior to GitLab 13.3, there is a
[known bug in GitLab Workhorse that prevents HTTP Range Requests from working with CI job artifacts](https://gitlab.com/gitlab-org/gitlab/-/issues/223806).
Be sure to upgrade to GitLab 13.3.0 or above if you use S3 storage with this hardware.
@@ -578,7 +578,7 @@ real bucket into multiple virtual buckets. If your object storage
bucket is called `my-gitlab-objects` you can configure uploads to go
into `my-gitlab-objects/uploads`, artifacts into
`my-gitlab-objects/artifacts`, etc. The application will act as if
-these are separate buckets. Note that use of bucket prefixes
+these are separate buckets. Note that use of bucket prefixes
[may not work correctly with Helm backups](https://gitlab.com/gitlab-org/charts/gitlab/-/issues/3376).
Helm-based installs require separate buckets to
@@ -693,7 +693,7 @@ configuration.
When configured either with an instance profile or with the consolidated
object configuration, GitLab Workhorse properly uploads files to S3
buckets that have [SSE-S3 or SSE-KMS encryption enabled by default](https://docs.aws.amazon.com/kms/latest/developerguide/services-s3.html).
-Customer master keys (CMKs) and SSE-C encryption are
+Customer master keys (CMKs) and SSE-C encryption are
[not supported since this requires sending the encryption keys in every request](https://gitlab.com/gitlab-org/gitlab/-/issues/226006).
##### Server-side encryption headers
@@ -701,7 +701,7 @@ Customer master keys (CMKs) and SSE-C encryption are
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38240) in GitLab 13.3.
Setting a default encryption on an S3 bucket is the easiest way to
-enable encryption, but you may want to
+enable encryption, but you may want to
[set a bucket policy to ensure only encrypted objects are uploaded](https://aws.amazon.com/premiumsupport/knowledge-center/s3-bucket-store-kms-encrypted-objects/).
To do this, you must configure GitLab to send the proper encryption headers
in the `storage_options` configuration section:
diff --git a/doc/administration/operations/index.md b/doc/administration/operations/index.md
index a6e66abdbdb..179958c6df1 100644
--- a/doc/administration/operations/index.md
+++ b/doc/administration/operations/index.md
@@ -18,7 +18,7 @@ Keep your GitLab instance up and running smoothly.
- [Multiple Sidekiq processes](extra_sidekiq_processes.md): Configure multiple Sidekiq processes to ensure certain queues always have dedicated workers, no matter the number of jobs that must be processed. **(FREE SELF)**
- [Sidekiq routing rules](extra_sidekiq_routing.md): Configure the routing rules to route a job from a worker to a desirable queue. **(FREE SELF)**
- [Puma](puma.md): Understand Puma and puma-worker-killer.
-- Speed up SSH operations by
+- Speed up SSH operations by
[Authorizing SSH users via a fast, indexed lookup to the GitLab database](fast_ssh_key_lookup.md), and/or
by [doing away with user SSH keys stored on GitLab entirely in favor of SSH certificates](ssh_certificates.md).
- [File System Performance Benchmarking](filesystem_benchmarking.md): File system
diff --git a/doc/administration/operations/rails_console.md b/doc/administration/operations/rails_console.md
index 430dfbc637c..4838a3c5516 100644
--- a/doc/administration/operations/rails_console.md
+++ b/doc/administration/operations/rails_console.md
@@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Rails console **(FREE SELF)**
-At the heart of GitLab is a web application
+At the heart of GitLab is a web application
[built using the Ruby on Rails framework](https://about.gitlab.com/blog/2018/10/29/why-we-use-rails-to-build-gitlab/).
The [Rails console](https://guides.rubyonrails.org/command_line.html#rails-console).
provides a way to interact with your GitLab instance from the command line, and also grants access to the amazing tools built right into Rails.
@@ -19,7 +19,7 @@ with no consequences, you are strongly advised to do so in a test environment.
The Rails console is for GitLab system administrators who are troubleshooting
a problem or need to retrieve some data that can only be done through direct
-access of the GitLab application. Basic knowledge of Ruby is needed (try
+access of the GitLab application. Basic knowledge of Ruby is needed (try
[this 30-minute tutorial](https://try.ruby-lang.org/) for a quick introduction).
Rails experience is useful but not required.
@@ -136,7 +136,7 @@ root
1
```
-Some basic knowledge of Ruby will be very useful. Try
+Some basic knowledge of Ruby will be very useful. Try
[this 30-minute tutorial](https://try.ruby-lang.org/) for a quick introduction.
Rails experience is helpful but not essential.
diff --git a/doc/administration/operations/ssh_certificates.md b/doc/administration/operations/ssh_certificates.md
index 1e405189342..8069dad4d8d 100644
--- a/doc/administration/operations/ssh_certificates.md
+++ b/doc/administration/operations/ssh_certificates.md
@@ -159,7 +159,7 @@ users (especially if they're renewed) than you have deploy keys.
Users can still bypass SSH certificate authentication by manually
uploading an SSH public key to their profile, relying on the
`~/.ssh/authorized_keys` fallback to authenticate it. There's
-currently no feature to prevent this,
+currently no feature to prevent this,
[but there's an open request for adding it](https://gitlab.com/gitlab-org/gitlab/-/issues/23260).
Such a restriction can currently be hacked in by, for example, providing a
diff --git a/doc/administration/packages/container_registry.md b/doc/administration/packages/container_registry.md
index 8b115ca1af4..c2ab34cc6b8 100644
--- a/doc/administration/packages/container_registry.md
+++ b/doc/administration/packages/container_registry.md
@@ -1202,7 +1202,7 @@ Before diving in to the following sections, here's some basic troubleshooting:
been synchronized (for example, via NTP).
1. If you are using an S3-backed Registry, double check that the IAM
- permissions and the S3 credentials (including region) are correct. See
+ permissions and the S3 credentials (including region) are correct. See
[the sample IAM policy](https://docs.docker.com/registry/storage-drivers/s3/)
for more details.
@@ -1631,7 +1631,7 @@ wrong. However, since all communications between Docker clients and servers
are done over HTTPS, it's a bit difficult to decrypt the traffic quickly even
if you know the private key. What can we do instead?
-One way would be to disable HTTPS by setting up an
+One way would be to disable HTTPS by setting up an
[insecure Registry](https://docs.docker.com/registry/insecure/). This could introduce a
security hole and is only recommended for local testing. If you have a
production system and can't or don't want to do this, there is another way:
diff --git a/doc/administration/pages/index.md b/doc/administration/pages/index.md
index d5eb354c104..992757cfc1c 100644
--- a/doc/administration/pages/index.md
+++ b/doc/administration/pages/index.md
@@ -933,7 +933,7 @@ The following settings are:
| `connection` | Various connection options described below. | |
NOTE:
-If you want to stop using and disconnect the NFS server, you need to
+If you want to stop using and disconnect the NFS server, you need to
[explicitly disable local storage](#disable-pages-local-storage), and it's only possible after upgrading to GitLab 13.11.
#### S3-compatible connection settings
diff --git a/doc/administration/raketasks/uploads/migrate.md b/doc/administration/raketasks/uploads/migrate.md
index c73840cb9ff..216c0875645 100644
--- a/doc/administration/raketasks/uploads/migrate.md
+++ b/doc/administration/raketasks/uploads/migrate.md
@@ -79,7 +79,8 @@ The Rake task uses three parameters to find uploads to migrate:
NOTE:
These parameters are mainly internal to the structure of GitLab, you may want to refer to the task list
-instead below.
+instead below. After running these individual tasks, we recommend that you run the [all-in-one Rake task](#all-in-one-rake-task)
+to migrate any uploads not included in the listed types.
This task also accepts an environment variable which you can use to override
the default batch size:
diff --git a/doc/administration/redis/replication_and_failover.md b/doc/administration/redis/replication_and_failover.md
index c4b83b66738..b775b579fd4 100644
--- a/doc/administration/redis/replication_and_failover.md
+++ b/doc/administration/redis/replication_and_failover.md
@@ -343,7 +343,7 @@ NOTE:
If you are using an external Redis Sentinel instance, be sure
to exclude the `requirepass` parameter from the Sentinel
configuration. This parameter causes clients to report `NOAUTH
-Authentication required.`.
+Authentication required.`.
[Redis Sentinel 3.2.x does not support password authentication](https://github.com/antirez/redis/issues/3279).
Now that the Redis servers are all set up, let's configure the Sentinel
diff --git a/doc/administration/restart_gitlab.md b/doc/administration/restart_gitlab.md
index 6625039504a..e5ec12054b8 100644
--- a/doc/administration/restart_gitlab.md
+++ b/doc/administration/restart_gitlab.md
@@ -102,7 +102,7 @@ depend on those files.
## Installations from source
-If you have followed the official installation guide to
+If you have followed the official installation guide to
[install GitLab from source](../install/installation.md), run the following command to restart GitLab:
```shell
diff --git a/doc/administration/terraform_state.md b/doc/administration/terraform_state.md
index 7a8d7774948..5a272025987 100644
--- a/doc/administration/terraform_state.md
+++ b/doc/administration/terraform_state.md
@@ -78,8 +78,8 @@ Terraform state files are stored locally, follow the steps below.
## Using object storage **(FREE SELF)**
-Instead of storing Terraform state files on disk, we recommend the use of
-[one of the supported object storage options](object_storage.md#options).
+Instead of storing Terraform state files on disk, we recommend the use of
+[one of the supported object storage options](object_storage.md#options).
This configuration relies on valid credentials to be configured already.
[Read more about using object storage with GitLab](object_storage.md).
diff --git a/doc/administration/troubleshooting/linux_cheat_sheet.md b/doc/administration/troubleshooting/linux_cheat_sheet.md
index 6ff6e562a7d..c1a428018c2 100644
--- a/doc/administration/troubleshooting/linux_cheat_sheet.md
+++ b/doc/administration/troubleshooting/linux_cheat_sheet.md
@@ -204,7 +204,7 @@ or you can build it from source if you have the Rust compiler.
#### How to use the tool
-First run the tool with `summary` flag to get a summary of the top processes sorted by time spent actively performing tasks.
+First run the tool with `summary` flag to get a summary of the top processes sorted by time spent actively performing tasks.
You can also sort based on total time, # of system calls made, PID #, and # of child processes
using the `-s` or `--sort` flag. The number of results defaults to 25 processes, but
can be changed using the `-c`/`--count` option. See `--help` for full details.
diff --git a/doc/administration/troubleshooting/tracing_correlation_id.md b/doc/administration/troubleshooting/tracing_correlation_id.md
index ee59b7c2504..917e27bab70 100644
--- a/doc/administration/troubleshooting/tracing_correlation_id.md
+++ b/doc/administration/troubleshooting/tracing_correlation_id.md
@@ -6,6 +6,6 @@ remove_date: '2022-11-12'
This document was moved to [another location](../logs/tracing_correlation_id.md).
<!-- This redirect file can be deleted after 2022-11-12. -->
-<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html --> \ No newline at end of file
diff --git a/doc/api/graphql/reference/index.md b/doc/api/graphql/reference/index.md
index 48a559e8ce4..5a52df38970 100644
--- a/doc/api/graphql/reference/index.md
+++ b/doc/api/graphql/reference/index.md
@@ -9843,7 +9843,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="boardepicancestorsiid"></a>`iid` | [`ID`](#id) | IID of the epic, e.g., "1". |
| <a id="boardepicancestorsiidstartswith"></a>`iidStartsWith` | [`String`](#string) | Filter epics by IID for autocomplete. |
| <a id="boardepicancestorsiids"></a>`iids` | [`[ID!]`](#id) | List of IIDs of epics, e.g., `[1, 2]`. |
-| <a id="boardepicancestorsin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument. |
+| <a id="boardepicancestorsin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="boardepicancestorsincludeancestorgroups"></a>`includeAncestorGroups` | [`Boolean`](#boolean) | Include epics from ancestor groups. |
| <a id="boardepicancestorsincludedescendantgroups"></a>`includeDescendantGroups` | [`Boolean`](#boolean) | Include epics from descendant groups. |
| <a id="boardepicancestorslabelname"></a>`labelName` | [`[String!]`](#string) | Filter epics by labels. |
@@ -9881,7 +9881,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="boardepicchildreniid"></a>`iid` | [`ID`](#id) | IID of the epic, e.g., "1". |
| <a id="boardepicchildreniidstartswith"></a>`iidStartsWith` | [`String`](#string) | Filter epics by IID for autocomplete. |
| <a id="boardepicchildreniids"></a>`iids` | [`[ID!]`](#id) | List of IIDs of epics, e.g., `[1, 2]`. |
-| <a id="boardepicchildrenin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument. |
+| <a id="boardepicchildrenin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="boardepicchildrenincludeancestorgroups"></a>`includeAncestorGroups` | [`Boolean`](#boolean) | Include epics from ancestor groups. |
| <a id="boardepicchildrenincludedescendantgroups"></a>`includeDescendantGroups` | [`Boolean`](#boolean) | Include epics from descendant groups. |
| <a id="boardepicchildrenlabelname"></a>`labelName` | [`[String!]`](#string) | Filter epics by labels. |
@@ -11581,7 +11581,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="epicancestorsiid"></a>`iid` | [`ID`](#id) | IID of the epic, e.g., "1". |
| <a id="epicancestorsiidstartswith"></a>`iidStartsWith` | [`String`](#string) | Filter epics by IID for autocomplete. |
| <a id="epicancestorsiids"></a>`iids` | [`[ID!]`](#id) | List of IIDs of epics, e.g., `[1, 2]`. |
-| <a id="epicancestorsin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument. |
+| <a id="epicancestorsin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="epicancestorsincludeancestorgroups"></a>`includeAncestorGroups` | [`Boolean`](#boolean) | Include epics from ancestor groups. |
| <a id="epicancestorsincludedescendantgroups"></a>`includeDescendantGroups` | [`Boolean`](#boolean) | Include epics from descendant groups. |
| <a id="epicancestorslabelname"></a>`labelName` | [`[String!]`](#string) | Filter epics by labels. |
@@ -11619,7 +11619,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="epicchildreniid"></a>`iid` | [`ID`](#id) | IID of the epic, e.g., "1". |
| <a id="epicchildreniidstartswith"></a>`iidStartsWith` | [`String`](#string) | Filter epics by IID for autocomplete. |
| <a id="epicchildreniids"></a>`iids` | [`[ID!]`](#id) | List of IIDs of epics, e.g., `[1, 2]`. |
-| <a id="epicchildrenin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument. |
+| <a id="epicchildrenin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="epicchildrenincludeancestorgroups"></a>`includeAncestorGroups` | [`Boolean`](#boolean) | Include epics from ancestor groups. |
| <a id="epicchildrenincludedescendantgroups"></a>`includeDescendantGroups` | [`Boolean`](#boolean) | Include epics from descendant groups. |
| <a id="epicchildrenlabelname"></a>`labelName` | [`[String!]`](#string) | Filter epics by labels. |
@@ -12469,7 +12469,7 @@ Returns [`Epic`](#epic).
| <a id="groupepiciid"></a>`iid` | [`ID`](#id) | IID of the epic, e.g., "1". |
| <a id="groupepiciidstartswith"></a>`iidStartsWith` | [`String`](#string) | Filter epics by IID for autocomplete. |
| <a id="groupepiciids"></a>`iids` | [`[ID!]`](#id) | List of IIDs of epics, e.g., `[1, 2]`. |
-| <a id="groupepicin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument. |
+| <a id="groupepicin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="groupepicincludeancestorgroups"></a>`includeAncestorGroups` | [`Boolean`](#boolean) | Include epics from ancestor groups. |
| <a id="groupepicincludedescendantgroups"></a>`includeDescendantGroups` | [`Boolean`](#boolean) | Include epics from descendant groups. |
| <a id="groupepiclabelname"></a>`labelName` | [`[String!]`](#string) | Filter epics by labels. |
@@ -12519,7 +12519,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="groupepicsiid"></a>`iid` | [`ID`](#id) | IID of the epic, e.g., "1". |
| <a id="groupepicsiidstartswith"></a>`iidStartsWith` | [`String`](#string) | Filter epics by IID for autocomplete. |
| <a id="groupepicsiids"></a>`iids` | [`[ID!]`](#id) | List of IIDs of epics, e.g., `[1, 2]`. |
-| <a id="groupepicsin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument. |
+| <a id="groupepicsin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="groupepicsincludeancestorgroups"></a>`includeAncestorGroups` | [`Boolean`](#boolean) | Include epics from ancestor groups. |
| <a id="groupepicsincludedescendantgroups"></a>`includeDescendantGroups` | [`Boolean`](#boolean) | Include epics from descendant groups. |
| <a id="groupepicslabelname"></a>`labelName` | [`[String!]`](#string) | Filter epics by labels. |
@@ -12581,6 +12581,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="groupissuesepicid"></a>`epicId` | [`String`](#string) | ID of an epic associated with the issues, "none" and "any" values are supported. |
| <a id="groupissuesiid"></a>`iid` | [`String`](#string) | IID of the issue. For example, "1". |
| <a id="groupissuesiids"></a>`iids` | [`[String!]`](#string) | List of IIDs of issues. For example, `["1", "2"]`. |
+| <a id="groupissuesin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="groupissuesincludearchived"></a>`includeArchived` | [`Boolean`](#boolean) | Return issues from archived projects. |
| <a id="groupissuesincludesubepics"></a>`includeSubepics` | [`Boolean`](#boolean) | Whether to include subepics when filtering issues by epicId. |
| <a id="groupissuesincludesubgroups"></a>`includeSubgroups` | [`Boolean`](#boolean) | Include issues belonging to subgroups. |
@@ -16073,6 +16074,7 @@ Returns [`Issue`](#issue).
| <a id="projectissueepicid"></a>`epicId` | [`String`](#string) | ID of an epic associated with the issues, "none" and "any" values are supported. |
| <a id="projectissueiid"></a>`iid` | [`String`](#string) | IID of the issue. For example, "1". |
| <a id="projectissueiids"></a>`iids` | [`[String!]`](#string) | List of IIDs of issues. For example, `["1", "2"]`. |
+| <a id="projectissuein"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="projectissueincludesubepics"></a>`includeSubepics` | [`Boolean`](#boolean) | Whether to include subepics when filtering issues by epicId. |
| <a id="projectissueiterationid"></a>`iterationId` | [`[ID]`](#id) | List of iteration Global IDs applied to the issue. |
| <a id="projectissueiterationwildcardid"></a>`iterationWildcardId` | [`IterationWildcardId`](#iterationwildcardid) | Filter by iteration ID wildcard. |
@@ -16114,6 +16116,7 @@ Returns [`IssueStatusCountsType`](#issuestatuscountstype).
| <a id="projectissuestatuscountscrmorganizationid"></a>`crmOrganizationId` | [`String`](#string) | ID of an organization assigned to the issues. |
| <a id="projectissuestatuscountsiid"></a>`iid` | [`String`](#string) | IID of the issue. For example, "1". |
| <a id="projectissuestatuscountsiids"></a>`iids` | [`[String!]`](#string) | List of IIDs of issues. For example, `["1", "2"]`. |
+| <a id="projectissuestatuscountsin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="projectissuestatuscountslabelname"></a>`labelName` | [`[String]`](#string) | Labels applied to this issue. |
| <a id="projectissuestatuscountsmilestonetitle"></a>`milestoneTitle` | [`[String]`](#string) | Milestone applied to this issue. |
| <a id="projectissuestatuscountsmilestonewildcardid"></a>`milestoneWildcardId` | [`MilestoneWildcardId`](#milestonewildcardid) | Filter issues by milestone ID wildcard. |
@@ -16154,6 +16157,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| <a id="projectissuesepicid"></a>`epicId` | [`String`](#string) | ID of an epic associated with the issues, "none" and "any" values are supported. |
| <a id="projectissuesiid"></a>`iid` | [`String`](#string) | IID of the issue. For example, "1". |
| <a id="projectissuesiids"></a>`iids` | [`[String!]`](#string) | List of IIDs of issues. For example, `["1", "2"]`. |
+| <a id="projectissuesin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="projectissuesincludesubepics"></a>`includeSubepics` | [`Boolean`](#boolean) | Whether to include subepics when filtering issues by epicId. |
| <a id="projectissuesiterationid"></a>`iterationId` | [`[ID]`](#id) | List of iteration Global IDs applied to the issue. |
| <a id="projectissuesiterationwildcardid"></a>`iterationWildcardId` | [`IterationWildcardId`](#iterationwildcardid) | Filter by iteration ID wildcard. |
@@ -16731,6 +16735,7 @@ four standard [pagination arguments](#connection-pagination-arguments):
| ---- | ---- | ----------- |
| <a id="projectworkitemsiid"></a>`iid` | [`String`](#string) | IID of the issue. For example, "1". |
| <a id="projectworkitemsiids"></a>`iids` | [`[String!]`](#string) | List of IIDs of work items. For example, `["1", "2"]`. |
+| <a id="projectworkitemsin"></a>`in` | [`[IssuableSearchableField!]`](#issuablesearchablefield) | Specify the fields to perform the search in. Defaults to `[TITLE, DESCRIPTION]`. Requires the `search` argument.'. |
| <a id="projectworkitemssearch"></a>`search` | [`String`](#string) | Search query for title or description. |
| <a id="projectworkitemssort"></a>`sort` | [`WorkItemSort`](#workitemsort) | Sort work items by this criteria. |
| <a id="projectworkitemsstate"></a>`state` | [`IssuableState`](#issuablestate) | Current state of this work item. |
diff --git a/doc/api/groups.md b/doc/api/groups.md
index 573928a3847..d0204b9a283 100644
--- a/doc/api/groups.md
+++ b/doc/api/groups.md
@@ -874,6 +874,50 @@ curl --request POST --header "PRIVATE-TOKEN: <your_access_token>" \
"https://gitlab.example.com/api/v4/groups/4/projects/56"
```
+## Get groups to which a user can transfer a group
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/371117) in GitLab 15.4
+
+Retrieve a list of groups to which the user can transfer a group.
+
+```plaintext
+GET /groups/:id/transfer_locations
+```
+
+| Attribute | Type | Required | Description |
+|-------------|----------------|------------------------|-------------|
+| `id` | integer or string | **{check-circle}** Yes | The ID or [URL-encoded path of the group to be transferred](index.md#namespaced-path-encoding). |
+| `search` | string | **{dotted-circle}** No | The group names to search for. |
+
+Example request:
+
+```shell
+curl --request GET "https://gitlab.example.com/api/v4/groups/1/transfer_locations"
+```
+
+Example response:
+
+```json
+[
+ {
+ "id": 27,
+ "web_url": "https://gitlab.example.com/groups/gitlab",
+ "name": "GitLab",
+ "avatar_url": null,
+ "full_name": "GitLab",
+ "full_path": "GitLab"
+ },
+ {
+ "id": 31,
+ "web_url": "https://gitlab.example.com/groups/foobar",
+ "name": "FooBar",
+ "avatar_url": null,
+ "full_name": "FooBar",
+ "full_path": "FooBar"
+ }
+]
+```
+
## Transfer a group to a new parent group / Turn a subgroup to a top-level group
> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/23831) in GitLab 14.6.
diff --git a/doc/api/members.md b/doc/api/members.md
index aa7d697c6c6..b0992aafb7e 100644
--- a/doc/api/members.md
+++ b/doc/api/members.md
@@ -47,7 +47,7 @@ GET /projects/:id/members
| `id` | integer/string | yes | The ID or [URL-encoded path of the project or group](index.md#namespaced-path-encoding) owned by the authenticated user |
| `query` | string | no | A query string to search for members |
| `user_ids` | array of integers | no | Filter the results on the given user IDs |
-| `skip_users` | array of integers | no | Filter skipped users out of the results |
+| `skip_users` | array of integers | no | Filter skipped users out of the results |
```shell
curl --header "PRIVATE-TOKEN: <your_access_token>" "https://gitlab.example.com/api/v4/groups/:id/members"
diff --git a/doc/api/oauth2.md b/doc/api/oauth2.md
index 31058100676..f3ad1f8082f 100644
--- a/doc/api/oauth2.md
+++ b/doc/api/oauth2.md
@@ -261,7 +261,7 @@ Check the [RFC spec](https://tools.ietf.org/html/rfc6749#section-4.3) for a
detailed flow description.
NOTE:
-The Resource Owner Password Credentials is disabled for users with
+The Resource Owner Password Credentials is disabled for users with
[two-factor authentication](../user/profile/account/two_factor_authentication.md) turned on.
These users can access the API using [personal access tokens](../user/profile/personal_access_tokens.md)
instead.
diff --git a/doc/api/packages/conan.md b/doc/api/packages/conan.md
index 3ac2eeb40b1..637c3d27d75 100644
--- a/doc/api/packages/conan.md
+++ b/doc/api/packages/conan.md
@@ -38,7 +38,7 @@ The examples in this document all use the instance-level prefix.
/packages/conan/v1
```
-When using the instance-level routes, be aware that there is a
+When using the instance-level routes, be aware that there is a
[naming restriction](../../user/packages/conan_repository/index.md#package-recipe-naming-convention-for-instance-remotes)
for Conan recipes.
diff --git a/doc/api/repositories.md b/doc/api/repositories.md
index bf2ead43519..7d94edc0872 100644
--- a/doc/api/repositories.md
+++ b/doc/api/repositories.md
@@ -342,7 +342,7 @@ tags using these formats:
- `vX.Y.Z`
- `X.Y.Z`
-Where `X.Y.Z` is a version that follows [semantic versioning](https://semver.org/).
+Where `X.Y.Z` is a version that follows [semantic versioning](https://semver.org/).
For example, consider a project with the following tags:
- v1.0.0-pre1
diff --git a/doc/architecture/blueprints/ci_data_decay/index.md b/doc/architecture/blueprints/ci_data_decay/index.md
index 7c0bdf299db..bf2e7b00e84 100644
--- a/doc/architecture/blueprints/ci_data_decay/index.md
+++ b/doc/architecture/blueprints/ci_data_decay/index.md
@@ -48,7 +48,7 @@ PostgreSQL database running on GitLab.com.
This volume contributes to significant performance problems, development
challenges and is often related to production incidents.
-We also expect a [significant growth in the number of builds executed on GitLab.com](../ci_scale/index.md)
+We also expect a [significant growth in the number of builds executed on GitLab.com](../ci_scale/index.md)
in the upcoming years.
## Opportunity
@@ -61,7 +61,7 @@ pipelines that are older than a few months might help us to move this data out
of the primary database, to a different storage, that is more performant and
cost effective.
-It is already possible to prevent processing builds
+It is already possible to prevent processing builds
[that have been archived](../../../user/admin_area/settings/continuous_integration.md#archive-jobs).
When a build gets archived it will not be possible to retry it, but we still do
keep all the processing metadata in the database, and it consumes resources
diff --git a/doc/architecture/blueprints/ci_scale/index.md b/doc/architecture/blueprints/ci_scale/index.md
index 5822ae2b5ed..bd680714ae5 100644
--- a/doc/architecture/blueprints/ci_scale/index.md
+++ b/doc/architecture/blueprints/ci_scale/index.md
@@ -115,13 +115,13 @@ of the CI/CD Apdex score, and sometimes even causes a significant performance
degradation in the production environment.
There are multiple other strategies that can improve performance and
-reliability. We can use [Redis queuing](https://gitlab.com/gitlab-org/gitlab/-/issues/322972), or
+reliability. We can use [Redis queuing](https://gitlab.com/gitlab-org/gitlab/-/issues/322972), or
[a separate table that will accelerate SQL queries used to build queues](https://gitlab.com/gitlab-org/gitlab/-/issues/322766)
and we want to explore them.
-**Status**: As of October 2021 the new architecture
+**Status**: As of October 2021 the new architecture
[has been implemented on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/5909#note_680407908).
-The following epic tracks making it generally available:
+The following epic tracks making it generally available:
[Make the new pending builds architecture generally available](https://gitlab.com/groups/gitlab-org/-/epics/6954).
### Moving big amounts of data is challenging
diff --git a/doc/architecture/blueprints/cloud_native_build_logs/index.md b/doc/architecture/blueprints/cloud_native_build_logs/index.md
index 0c941e332cb..3a06d73141b 100644
--- a/doc/architecture/blueprints/cloud_native_build_logs/index.md
+++ b/doc/architecture/blueprints/cloud_native_build_logs/index.md
@@ -12,7 +12,7 @@ Cloud native and the adoption of Kubernetes has been recognised by GitLab to be
one of the top two biggest tailwinds that are helping us grow faster as a
company behind the project.
-This effort is described in a more details
+This effort is described in a more details
[in the infrastructure team handbook](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/).
## Traditional build logs
@@ -88,7 +88,7 @@ even tried to replace NFS with
Since that time it has become apparent that the cost of operations and
maintenance of a NFS cluster is significant and that if we ever decide to
-migrate to Kubernetes
+migrate to Kubernetes
[we need to decouple GitLab from a shared local storage and NFS](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/426#note_375646396).
1. NFS might be a single point of failure
@@ -113,7 +113,7 @@ of complexity, maintenance cost and enormous, negative impact on availability.
The work needed to make the new architecture production ready and enabled on
GitLab.com had been tracked in [Cloud Native Build Logs on GitLab.com](https://gitlab.com/groups/gitlab-org/-/epics/4275) epic.
-Enabling this feature on GitLab.com is a subtask of
+Enabling this feature on GitLab.com is a subtask of
[making the new architecture generally available](https://gitlab.com/groups/gitlab-org/-/epics/3791) for everyone.
## Status
diff --git a/doc/architecture/blueprints/cloud_native_gitlab_pages/index.md b/doc/architecture/blueprints/cloud_native_gitlab_pages/index.md
index 89c3a4cd6b4..431bc19ad84 100644
--- a/doc/architecture/blueprints/cloud_native_gitlab_pages/index.md
+++ b/doc/architecture/blueprints/cloud_native_gitlab_pages/index.md
@@ -17,7 +17,7 @@ Cloud Native and the adoption of Kubernetes has been recognised by GitLab to be
one of the top two biggest tailwinds that are helping us grow faster as a
company behind the project.
-This effort is described in more detail
+This effort is described in more detail
[in the infrastructure team handbook page](https://about.gitlab.com/handbook/engineering/infrastructure/production/kubernetes/gitlab-com/).
GitLab Pages is tightly coupled with NFS and in order to unblock Kubernetes
@@ -55,7 +55,7 @@ even tried to replace NFS with
Since that time it has become apparent that the cost of operations and
maintenance of a NFS cluster is significant and that if we ever decide to
-migrate to Kubernetes
+migrate to Kubernetes
[we need to decouple GitLab from a shared local storage and NFS](https://gitlab.com/gitlab-org/gitlab-pages/-/issues/426#note_375646396).
1. NFS might be a single point of failure
@@ -83,7 +83,7 @@ graph TD
C -- Serves static content --> E(Visitors)
```
-This new architecture has been briefly described in
+This new architecture has been briefly described in
[the blog post](https://about.gitlab.com/blog/2020/08/03/how-gitlab-pages-uses-the-gitlab-api-to-serve-content/)
too.
diff --git a/doc/architecture/blueprints/feature_flags_development/index.md b/doc/architecture/blueprints/feature_flags_development/index.md
index eaca7da6bd7..75238a7f422 100644
--- a/doc/architecture/blueprints/feature_flags_development/index.md
+++ b/doc/architecture/blueprints/feature_flags_development/index.md
@@ -115,8 +115,8 @@ These are reason why these changes are needed:
## Iterations
-This work is being done as part of dedicated epic:
-[Improve internal usage of Feature Flags](https://gitlab.com/groups/gitlab-org/-/epics/3551).
+This work is being done as part of dedicated epic:
+[Improve internal usage of Feature Flags](https://gitlab.com/groups/gitlab-org/-/epics/3551).
This epic describes a meta reasons for making these changes.
## Who
diff --git a/doc/architecture/blueprints/graphql_api/index.md b/doc/architecture/blueprints/graphql_api/index.md
index eb045de491e..1ee322c412b 100644
--- a/doc/architecture/blueprints/graphql_api/index.md
+++ b/doc/architecture/blueprints/graphql_api/index.md
@@ -44,11 +44,11 @@ It is an opportunity to learn from our experience in evolving the REST API, for
the scale, and to apply this knowledge onto the GraphQL development efforts. We
can do that by building query-to-feature correlation mechanisms, adding
scalable state synchronization support and aligning GraphQL with other
-architectural initiatives being executed in parallel, like
+architectural initiatives being executed in parallel, like
[the support for direct uploads](https://gitlab.com/gitlab-org/gitlab/-/issues/280819).
GraphQL should be secure by default. We can avoid common security mistakes by
-building mechanisms that will help us to enforce
+building mechanisms that will help us to enforce
[OWASP GraphQL recommendations](https://cheatsheetseries.owasp.org/cheatsheets/GraphQL_Cheat_Sheet.html)
that are relevant to us.
diff --git a/doc/architecture/blueprints/object_storage/index.md b/doc/architecture/blueprints/object_storage/index.md
index b70339c8b8d..7a4ecd0e5a8 100644
--- a/doc/architecture/blueprints/object_storage/index.md
+++ b/doc/architecture/blueprints/object_storage/index.md
@@ -31,8 +31,8 @@ underlying implementation for shared, distributed, highly-available
(HA) file storage.
Over time, we have built support for object storage across the
-application, solving specific problems in a
-[multitude of iterations](https://about.gitlab.com/company/team/structure/working-groups/object-storage/#company-efforts-on-uploads).
+application, solving specific problems in a
+[multitude of iterations](https://about.gitlab.com/company/team/structure/working-groups/object-storage/#company-efforts-on-uploads).
This has led to increased complexity across the board, from development
(new features and bug fixes) to installation:
@@ -67,7 +67,7 @@ This has led to increased complexity across the board, from development
The following is a brief description of the main directions we can take to
remove the pain points affecting our object storage implementation.
-This is also available as [a YouTube video](https://youtu.be/X9V_w8hsM8E) recorded for the
+This is also available as [a YouTube video](https://youtu.be/X9V_w8hsM8E) recorded for the
[Object Storage Working Group](https://about.gitlab.com/company/team/structure/working-groups/object-storage/).
### Simplify GitLab architecture by shipping MinIO
@@ -78,7 +78,7 @@ local storage and object storage.
With local storage, there is the assumption of a shared storage
between components. This can be achieved by having a single box
-installation, without HA, or with a NFS, which
+installation, without HA, or with a NFS, which
[we no longer recommend](../../../administration/nfs.md).
We have a testing gap on object storage. It also requires Workhorse
@@ -134,7 +134,7 @@ access to new features without infrastructure chores.
Our implementation is built on top of a 3rd-party framework where
every object storage client is a 3rd-party library. Unfortunately some
-of them are unmaintained.
+of them are unmaintained.
[We have customers who cannot push 5GB Git LFS objects](https://gitlab.com/gitlab-org/gitlab/-/issues/216442),
but with such a vital feature implemented in 3rd-party libraries we
are slowed down in fixing it, and we also rely on external maintainers
@@ -214,7 +214,7 @@ Proposal:
DRIs:
-The DRI for this blueprint is the
+The DRI for this blueprint is the
[Object Storage Working Group](https://about.gitlab.com/company/team/structure/working-groups/object-storage/).
<!-- vale gitlab.Spelling = YES -->
diff --git a/doc/architecture/blueprints/runner_scaling/index.md b/doc/architecture/blueprints/runner_scaling/index.md
index 494aaa6a641..8f7062a1148 100644
--- a/doc/architecture/blueprints/runner_scaling/index.md
+++ b/doc/architecture/blueprints/runner_scaling/index.md
@@ -33,7 +33,7 @@ This design choice was crucial for the GitLab Runner success. Since that time
the auto-scaling feature has been used by many users and customers and enabled
rapid growth of CI/CD adoption on GitLab.com.
-We can not, however, continue using Docker Machine. Work on that project
+We can not, however, continue using Docker Machine. Work on that project
[was paused in July 2018](https://github.com/docker/machine/issues/4537) and there
was no development made since that time (except for some highly important
security fixes). In 2018, after Docker Machine entered the "maintenance mode",
@@ -76,7 +76,7 @@ mechanism with a reliable and flexible mechanism. We might be unable to build a
drop-in replacement for Docker Machine, as there are presumably many reasons
why it has been deprecated. It is very difficult to maintain compatibility with
so many cloud providers, and it seems that Docker Machine has been deprecated
-in favor of Docker Desktop, which is not a viable replacement for us.
+in favor of Docker Desktop, which is not a viable replacement for us.
[This issue](https://github.com/docker/roadmap/issues/245) contains a discussion
about how people are using Docker Machine right now, and it seems that GitLab
CI is one of the most frequent reasons for people to keep using Docker Machine.
diff --git a/doc/ci/docker/using_docker_build.md b/doc/ci/docker/using_docker_build.md
index ea4ad25637b..4c9cb2923d7 100644
--- a/doc/ci/docker/using_docker_build.md
+++ b/doc/ci/docker/using_docker_build.md
@@ -345,7 +345,7 @@ not without its own challenges:
root file system, you can use the job's working directory as a mount point for
child containers. For example, if you have files you want to share with a
child container, you might create a subdirectory under `/builds/$CI_PROJECT_PATH`
- and use it as your mount point. For a more detailed explanation, view
+ and use it as your mount point. For a more detailed explanation, view
[issue #41227](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227).
```yaml
@@ -406,7 +406,7 @@ sudo gitlab-runner register -n \
##### Enable registry mirror for `docker:dind` service
When the Docker daemon starts inside of the service container, it uses
-the default configuration. You may want to configure a
+the default configuration. You may want to configure a
[registry mirror](https://docs.docker.com/registry/recipes/mirror/) for
performance improvements and to ensure you don't reach Docker Hub rate limits.
diff --git a/doc/ci/interactive_web_terminal/index.md b/doc/ci/interactive_web_terminal/index.md
index e6a9f1fa646..03c905184cf 100644
--- a/doc/ci/interactive_web_terminal/index.md
+++ b/doc/ci/interactive_web_terminal/index.md
@@ -18,7 +18,7 @@ taken to protect the users.
NOTE:
[Shared runners on GitLab.com](../runners/index.md) do not
-provide an interactive web terminal. Follow
+provide an interactive web terminal. Follow
[this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/24674) for progress on
adding support. For groups and projects hosted on GitLab.com, interactive web
terminals are available when using your own group or project runner.
@@ -27,7 +27,7 @@ terminals are available when using your own group or project runner.
Two things need to be configured for the interactive web terminal to work:
-- The runner needs to have
+- The runner needs to have
[`[session_server]` configured properly](https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-session_server-section)
- If you are using a reverse proxy with your GitLab instance, web terminals need to be
[enabled](../../administration/integration/terminal.md#enabling-and-disabling-terminal-support)
@@ -54,7 +54,7 @@ Not all executors are
NOTE:
The `docker` executor does not keep running
after the build script is finished. At that point, the terminal automatically
-disconnects and does not wait for the user to finish. Please follow
+disconnects and does not wait for the user to finish. Please follow
[this issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3605) for updates on
improving this behavior.
diff --git a/doc/ci/pipelines/merge_trains.md b/doc/ci/pipelines/merge_trains.md
index 2882cd378aa..6547ea3895b 100644
--- a/doc/ci/pipelines/merge_trains.md
+++ b/doc/ci/pipelines/merge_trains.md
@@ -59,7 +59,7 @@ changes that are included in the target branch, and the `C` changes that are fro
the merge request already in the train.
<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-Watch this video for a demonstration on
+Watch this video for a demonstration on
[how parallel execution of merge trains can prevent commits from breaking the default branch](https://www.youtube.com/watch?v=D4qCqXgZkHQ).
## Prerequisites
diff --git a/doc/ci/runners/runners_scope.md b/doc/ci/runners/runners_scope.md
index 9bd0b52f423..2d4453bdc9b 100644
--- a/doc/ci/runners/runners_scope.md
+++ b/doc/ci/runners/runners_scope.md
@@ -28,7 +28,7 @@ If you are using a self-managed instance of GitLab:
going to your project's **Settings > CI/CD**, expanding **Runners**,
and selecting **Show runner installation instructions**.
These instructions are also available [in the documentation](https://docs.gitlab.com/runner/install/index.html).
-- The administrator can also configure a maximum number of shared runner
+- The administrator can also configure a maximum number of shared runner
[CI/CD minutes for each group](../pipelines/cicd_minutes.md#set-the-quota-of-cicd-minutes-for-a-specific-namespace).
If you are using GitLab.com:
diff --git a/doc/development/api_styleguide.md b/doc/development/api_styleguide.md
index b72ef1bffc4..7f7d78bb58e 100644
--- a/doc/development/api_styleguide.md
+++ b/doc/development/api_styleguide.md
@@ -110,14 +110,14 @@ Model.create(foo: params[:foo])
With Grape v1.3+, Array types must be defined with a `coerce_with`
block, or parameters, fails to validate when passed a string from an
-API request. See the
+API request. See the
[Grape upgrading documentation](https://github.com/ruby-grape/grape/blob/master/UPGRADING.md#ensure-that-array-types-have-explicit-coercions)
for more details.
### Automatic coercion of nil inputs
Prior to Grape v1.3.3, Array parameters with `nil` values would
-automatically be coerced to an empty Array. However, due to
+automatically be coerced to an empty Array. However, due to
[this pull request in v1.3.3](https://github.com/ruby-grape/grape/pull/2040), this
is no longer the case. For example, suppose you define a PUT `/test`
request that has an optional parameter:
@@ -259,7 +259,7 @@ In situations where the same model has multiple entities in the API
discretion with applying this scope. It may be that you optimize for the
most basic entity, with successive entities building upon that scope.
-The `with_api_entity_associations` scope also
+The `with_api_entity_associations` scope also
[automatically preloads data](https://gitlab.com/gitlab-org/gitlab/-/blob/19f74903240e209736c7668132e6a5a735954e7c/app%2Fmodels%2Ftodo.rb#L34)
for `Todo` _targets_ when returned in the [to-dos API](../api/todos.md).
diff --git a/doc/development/application_slis/index.md b/doc/development/application_slis/index.md
index 27e69ff3445..7fdebaab28b 100644
--- a/doc/development/application_slis/index.md
+++ b/doc/development/application_slis/index.md
@@ -45,8 +45,8 @@ for clarity, they define different metric names:
As shown in this example, they can share a base name (`foo` in this example). We
recommend this when they refer to the same operation.
-Before the first scrape, it is important to have
-[initialized the SLI with all possible label-combinations](https://prometheus.io/docs/practices/instrumentation/#avoid-missing-metrics).
+Before the first scrape, it is important to have
+[initialized the SLI with all possible label-combinations](https://prometheus.io/docs/practices/instrumentation/#avoid-missing-metrics).
This avoid confusing results when using these counters in calculations.
To initialize an SLI, use the `.initialize_sli` class method, for
diff --git a/doc/development/auto_devops.md b/doc/development/auto_devops.md
index 55ab234cc68..b9b8770207e 100644
--- a/doc/development/auto_devops.md
+++ b/doc/development/auto_devops.md
@@ -20,7 +20,7 @@ based on your project contents. When Auto DevOps is enabled for a
project, the user does not need to explicitly include any pipeline configuration
through a [`.gitlab-ci.yml` file](../ci/yaml/index.md).
-In the absence of a `.gitlab-ci.yml` file, the
+In the absence of a `.gitlab-ci.yml` file, the
[Auto DevOps CI/CD template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml)
is used implicitly to configure the pipeline for the project. This
template is a top-level template that includes other sub-templates,
diff --git a/doc/development/build_test_package.md b/doc/development/build_test_package.md
index 4645bd02d9e..97dd24fc522 100644
--- a/doc/development/build_test_package.md
+++ b/doc/development/build_test_package.md
@@ -13,7 +13,7 @@ pipeline that can be used to trigger a pipeline in the Omnibus GitLab repository
that will create:
- A deb package for Ubuntu 16.04, available as a build artifact, and
-- A Docker image, which is pushed to the
+- A Docker image, which is pushed to the
[Omnibus GitLab container registry](https://gitlab.com/gitlab-org/omnibus-gitlab/container_registry)
(images titled `gitlab-ce` and `gitlab-ee` respectively and image tag is the
commit which triggered the pipeline).
diff --git a/doc/development/changelog.md b/doc/development/changelog.md
index c5b234069e3..c0296a6d75e 100644
--- a/doc/development/changelog.md
+++ b/doc/development/changelog.md
@@ -190,7 +190,7 @@ editor. Once closed, Git presents you with a new text editor instance to edit
the commit message of commit B. Add the trailer, then save and quit the editor.
If all went well, commit B is now updated.
-For more information about interactive rebases, take a look at
+For more information about interactive rebases, take a look at
[the Git documentation](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History).
---
diff --git a/doc/development/code_intelligence/index.md b/doc/development/code_intelligence/index.md
index a89730383e4..87697a5e252 100644
--- a/doc/development/code_intelligence/index.md
+++ b/doc/development/code_intelligence/index.md
@@ -35,7 +35,7 @@ sequenceDiagram
Workhorse-->>-Runner: request results
```
-1. The CI/CD job generates a document in an LSIF format (usually `dump.lsif`) using
+1. The CI/CD job generates a document in an LSIF format (usually `dump.lsif`) using
[an indexer](https://lsif.dev) for the language of a project. The format
[describes](https://github.com/sourcegraph/sourcegraph/blob/main/doc/code_intelligence/explanations/writing_an_indexer.md)
interactions between a method or function and its definitions or references. The
diff --git a/doc/development/database/ci_mirrored_tables.md b/doc/development/database/ci_mirrored_tables.md
index 06f0087fafe..1d285e607fa 100644
--- a/doc/development/database/ci_mirrored_tables.md
+++ b/doc/development/database/ci_mirrored_tables.md
@@ -10,9 +10,9 @@ info: To determine the technical writer assigned to the Stage/Group associated w
As part of the database [decomposition work](https://gitlab.com/groups/gitlab-org/-/epics/6168),
which had the goal of splitting the single database GitLab is using, into two databases: `main` and
-`ci`, came the big challenge of
+`ci`, came the big challenge of
[removing all joins between the `main` and the `ci` tables](multiple_databases.md#removing-joins-between-ci-and-non-ci-tables).
-That is because PostgreSQL doesn't support joins between tables that belong to different databases.
+That is because PostgreSQL doesn't support joins between tables that belong to different databases.
However, some core application models in the main database are queried very often by the CI side.
For example:
diff --git a/doc/development/database/client_side_connection_pool.md b/doc/development/database/client_side_connection_pool.md
index 3cd0e836a8d..3143391a553 100644
--- a/doc/development/database/client_side_connection_pool.md
+++ b/doc/development/database/client_side_connection_pool.md
@@ -10,7 +10,7 @@ Ruby processes accessing the database through
ActiveRecord, automatically calculate the connection-pool size for the
process based on the concurrency.
-Because of the way [Ruby on Rails manages database connections](#connection-lifecycle),
+Because of the way [Ruby on Rails manages database connections](#connection-lifecycle),
it is important that we have at
least as many connections as we have threads. While there is a 'pool'
setting in [`database.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/database.yml.postgresql), it is not very practical because you need to
@@ -28,7 +28,7 @@ because connections are instantiated lazily.
## Troubleshooting connection-pool issues
-The connection-pool usage can be seen per environment in the
+The connection-pool usage can be seen per environment in the
[connection-pool saturation dashboard](https://dashboards.gitlab.net/d/alerts-sat_rails_db_connection_pool/alerts-rails_db_connection_pool-saturation-detail?orgId=1).
If the connection-pool is too small, this would manifest in
diff --git a/doc/development/database/loose_foreign_keys.md b/doc/development/database/loose_foreign_keys.md
index 8dbccf048d7..0af12939629 100644
--- a/doc/development/database/loose_foreign_keys.md
+++ b/doc/development/database/loose_foreign_keys.md
@@ -221,7 +221,7 @@ ON DELETE CASCADE;
```
The migration must run after the `DELETE` trigger is installed and the loose
-foreign key definition is deployed. As such, it must be a
+foreign key definition is deployed. As such, it must be a
[post-deployment migration](post_deployment_migrations.md) dated after the migration for the
trigger. If the foreign key is deleted earlier, there is a good chance of
introducing data inconsistency which needs manual cleanup:
diff --git a/doc/development/database/multiple_databases.md b/doc/development/database/multiple_databases.md
index 31fc454f8a7..034a2c2e438 100644
--- a/doc/development/database/multiple_databases.md
+++ b/doc/development/database/multiple_databases.md
@@ -7,7 +7,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Multiple Databases
To allow GitLab to scale further we
-[decomposed the GitLab application database into multiple databases](https://gitlab.com/groups/gitlab-org/-/epics/6168).
+[decomposed the GitLab application database into multiple databases](https://gitlab.com/groups/gitlab-org/-/epics/6168).
The two databases are `main` and `ci`. GitLab supports being run with either one database or two databases.
On GitLab.com we are using two separate databases.
diff --git a/doc/development/database/strings_and_the_text_data_type.md b/doc/development/database/strings_and_the_text_data_type.md
index e2e1191018b..4b5d1fc8f72 100644
--- a/doc/development/database/strings_and_the_text_data_type.md
+++ b/doc/development/database/strings_and_the_text_data_type.md
@@ -148,7 +148,7 @@ to update the `title_html` with a title that has more than 1024 characters, the
a database error.
Adding or removing a constraint to an existing attribute requires that any application changes are
-deployed _first_,
+deployed _first_,
otherwise servers still in the old version of the application
[may try to update the attribute with invalid values](../multi_version_compatibility.md#ci-artifact-uploads-were-failing).
For these reasons, `add_text_limit` should run in a post-deployment migration.
diff --git a/doc/development/database/understanding_explain_plans.md b/doc/development/database/understanding_explain_plans.md
index 446a84d5232..b3f99da5b26 100644
--- a/doc/development/database/understanding_explain_plans.md
+++ b/doc/development/database/understanding_explain_plans.md
@@ -252,7 +252,7 @@ A scan on an index that required retrieving some data from the table.
Bitmap scans fall between sequential scans and index scans. These are typically
used when we would read too much data from an index scan, but too little to
-perform a sequential scan. A bitmap scan uses what is known as a
+perform a sequential scan. A bitmap scan uses what is known as a
[bitmap index](https://en.wikipedia.org/wiki/Bitmap_index) to perform its work.
The [source code of PostgreSQL](https://gitlab.com/postgres/postgres/blob/REL_11_STABLE/src/include/nodes/plannodes.h#L441)
diff --git a/doc/development/distributed_tracing.md b/doc/development/distributed_tracing.md
index f49d024095d..9d62f2061ca 100644
--- a/doc/development/distributed_tracing.md
+++ b/doc/development/distributed_tracing.md
@@ -73,13 +73,13 @@ In this example, we have the following hypothetical values:
- `driver`: the driver such a Jaeger.
- `param_name`, `param_value`: these are driver specific configuration values. Configuration
- parameters for Jaeger are documented [further on in this document](#2-configure-the-gitlab_tracing-environment-variable)
+ parameters for Jaeger are documented [further on in this document](#2-configure-the-gitlab_tracing-environment-variable)
they should be URL encoded.
Multiple values should be separated by `&` characters like a URL.
## Using Jaeger in the GitLab Development Kit
-The first tracing implementation that GitLab supports is Jaeger, and the
+The first tracing implementation that GitLab supports is Jaeger, and the
[GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit/) supports distributed tracing with
Jaeger out-of-the-box.
@@ -116,7 +116,7 @@ Jaeger has many configuration options, but is very easy to start in an "all-in-o
memory for trace storage (and is therefore non-persistent). The main advantage of "all-in-one" mode
being ease of use.
-For more detailed configuration options, refer to the
+For more detailed configuration options, refer to the
[Jaeger documentation](https://www.jaegertracing.io/docs/1.9/getting-started/).
#### Using Docker
@@ -201,7 +201,7 @@ If `GITLAB_TRACING` is not configured correctly, this issue is logged:
```
By default, GitLab ships with the Jaeger tracer, but other tracers can be included at compile time.
-Details of how this can be done are included in the
+Details of how this can be done are included in the
[LabKit tracing documentation](https://pkg.go.dev/gitlab.com/gitlab-org/labkit/tracing).
If no log messages about tracing are emitted, the `GITLAB_TRACING` environment variable is likely
diff --git a/doc/development/ee_features.md b/doc/development/ee_features.md
index 777bc77875e..e64ec1c3b9c 100644
--- a/doc/development/ee_features.md
+++ b/doc/development/ee_features.md
@@ -281,7 +281,7 @@ There are a few gotchas with it:
overriding the method, because we can't know when the overridden method
(that is, calling `super` in the overriding method) would want to stop early.
In this case, we shouldn't just override it, but update the original method
- to make it call the other method we want to extend, like a
+ to make it call the other method we want to extend, like a
[template method pattern](https://en.wikipedia.org/wiki/Template_method_pattern).
For example, given this base:
diff --git a/doc/development/elasticsearch.md b/doc/development/elasticsearch.md
index 47942817790..b3996e16fa1 100644
--- a/doc/development/elasticsearch.md
+++ b/doc/development/elasticsearch.md
@@ -277,7 +277,7 @@ These Advanced Search migrations, like any other GitLab changes, need to support
Depending on the order of deployment, it's possible that the migration
has started or finished and there's still a server running the application code from before the
-migration. We need to take this into consideration until we can
+migration. We need to take this into consideration until we can
[ensure all Advanced Search migrations start after the deployment has finished](https://gitlab.com/gitlab-org/gitlab/-/issues/321619).
### Reverting a migration
@@ -317,7 +317,7 @@ safely can.
We choose to use GitLab major version upgrades as a safe time to remove
backwards compatibility for indices that have not been fully migrated. We
-[document this in our upgrade documentation](../update/index.md#upgrading-to-a-new-major-version).
+[document this in our upgrade documentation](../update/index.md#upgrading-to-a-new-major-version).
We also choose to replace the migration code with the halted migration
and remove tests so that:
@@ -399,7 +399,7 @@ that may contain information to help diagnose performance issues.
### Performance Bar
-Elasticsearch requests will be displayed in the
+Elasticsearch requests will be displayed in the
[`Performance Bar`](../administration/monitoring/performance/performance_bar.md), which can
be used both locally in development and on any deployed GitLab instance to
diagnose poor search performance. This will show the exact queries being made,
@@ -495,7 +495,7 @@ theoretically be used to figure out what needs to be replayed are:
These updates can be replayed by triggering another
`ElasticDeleteProjectWorker`.
-With the above methods and taking regular
+With the above methods and taking regular
[Elasticsearch snapshots](https://www.elastic.co/guide/en/elasticsearch/reference/current/snapshot-restore.html)
we should be able to recover from different kinds of data loss issues in a
relatively short period of time compared to indexing everything from
diff --git a/doc/development/emails.md b/doc/development/emails.md
index 1b3c9226dd8..c997916aa21 100644
--- a/doc/development/emails.md
+++ b/doc/development/emails.md
@@ -160,9 +160,9 @@ and Helm Chart configuration (see [example merge request](https://gitlab.com/git
#### Rationale
This was done because to avoid [thread deadlocks](https://github.com/ruby/net-imap/issues/14), `MailRoom` needs
-an updated version of the `net-imap` gem. However, this
-[version of the net-imap cannot be installed by an unprivileged user](https://github.com/ruby/net-imap/issues/14) due to
-[an error installing the digest gem](https://github.com/ruby/digest/issues/14).
+an updated version of the `net-imap` gem. However, this
+[version of the net-imap cannot be installed by an unprivileged user](https://github.com/ruby/net-imap/issues/14) due to
+[an error installing the digest gem](https://github.com/ruby/digest/issues/14).
[This bug in the Ruby interpreter](https://bugs.ruby-lang.org/issues/17761) was fixed in Ruby
3.0.2.
diff --git a/doc/development/fe_guide/graphql.md b/doc/development/fe_guide/graphql.md
index 442dda20d23..6dcc57b0ff5 100644
--- a/doc/development/fe_guide/graphql.md
+++ b/doc/development/fe_guide/graphql.md
@@ -729,8 +729,8 @@ In this case, we can either:
- Skip passing a cursor.
- Pass `null` explicitly to `after`.
-After data is fetched, we can use the `update`-hook as an opportunity
-[to customize the data that is set in the Vue component property](https://apollo.vuejs.org/api/smart-query.html#options).
+After data is fetched, we can use the `update`-hook as an opportunity
+[to customize the data that is set in the Vue component property](https://apollo.vuejs.org/api/smart-query.html#options).
This allows us to get a hold of the `pageInfo` object among other data.
In the `result`-hook, we can inspect the `pageInfo` object to see if we need to fetch
diff --git a/doc/development/fe_guide/vuex.md b/doc/development/fe_guide/vuex.md
index 14190d3fb5d..2d1569b7812 100644
--- a/doc/development/fe_guide/vuex.md
+++ b/doc/development/fe_guide/vuex.md
@@ -364,7 +364,7 @@ export default initialState => ({
We made the conscious decision to avoid this pattern to improve the ability to
discover and search our frontend codebase. The same applies
-when [providing data to a Vue app](vue.md#providing-data-from-haml-to-javascript). The reasoning for this is described in
+when [providing data to a Vue app](vue.md#providing-data-from-haml-to-javascript). The reasoning for this is described in
[this discussion](https://gitlab.com/gitlab-org/frontend/rfcs/-/issues/56#note_302514865):
> Consider a `someStateKey` is being used in the store state. You _may_ not be
diff --git a/doc/development/gemfile.md b/doc/development/gemfile.md
index f9cf69020bb..d993604ab7d 100644
--- a/doc/development/gemfile.md
+++ b/doc/development/gemfile.md
@@ -73,7 +73,7 @@ to a gem, go through these steps:
apply if someone who currently works at GitLab wants to maintain
the gem beyond their time working at GitLab.
-When publishing a gem to RubyGems.org, also note the section on
+When publishing a gem to RubyGems.org, also note the section on
[gem owners](https://about.gitlab.com/handbook/developer-onboarding/#ruby-gems)
in the handbook.
@@ -132,7 +132,7 @@ that also relied on `thor` but had its version pinned to a vulnerable
one. These changes are easy to miss in the `Gemfile.lock`. Pinning the
version would result in a conflict that would need to be solved.
-To avoid upgrading indirect dependencies, we can use
+To avoid upgrading indirect dependencies, we can use
[`bundle update --conservative`](https://bundler.io/man/bundle-update.1.html#OPTIONS).
When submitting a merge request including a dependency update,
diff --git a/doc/development/geo/proxying.md b/doc/development/geo/proxying.md
index 2f0226c489c..d4cb611e965 100644
--- a/doc/development/geo/proxying.md
+++ b/doc/development/geo/proxying.md
@@ -128,7 +128,7 @@ Secondary-->>Client: admin/geo/replication/projects logged in response (session
## Git pull
-For historical reasons, the `push_from_secondary` path is used to forward a Git pull. There is
+For historical reasons, the `push_from_secondary` path is used to forward a Git pull. There is
[an issue proposing to rename this route](https://gitlab.com/gitlab-org/gitlab/-/issues/292690) to avoid confusion.
### Git pull over HTTP(s)
diff --git a/doc/development/git_object_deduplication.md b/doc/development/git_object_deduplication.md
index a6b359769f8..a20bdf633cd 100644
--- a/doc/development/git_object_deduplication.md
+++ b/doc/development/git_object_deduplication.md
@@ -18,7 +18,7 @@ GitLab implements Git object deduplication.
### Understanding Git alternates
-At the Git level, we achieve deduplication by using
+At the Git level, we achieve deduplication by using
[Git alternates](https://git-scm.com/docs/gitrepository-layout#gitrepository-layout-objects).
Git alternates is a mechanism that lets a repository borrow objects from
another repository on the same machine.
@@ -99,7 +99,7 @@ are as follows:
### Assumptions
-- All repositories in a pool must use [hashed storage](../administration/repository_storage_types.md).
+- All repositories in a pool must use [hashed storage](../administration/repository_storage_types.md).
This is so that we don't have to ever worry about updating paths in
`object/info/alternates` files.
- All repositories in a pool must be on the same Gitaly storage shard.
diff --git a/doc/development/github_importer.md b/doc/development/github_importer.md
index 0aa1bad711d..e3bf605638d 100644
--- a/doc/development/github_importer.md
+++ b/doc/development/github_importer.md
@@ -71,7 +71,7 @@ This worker imports all pull requests. For every pull request a job for the
### 5. Stage::ImportPullRequestsMergedByWorker
-This worker imports the pull requests' _merged-by_ user information. The
+This worker imports the pull requests' _merged-by_ user information. The
[_List pull requests_](https://docs.github.com/en/rest/pulls#list-pull-requests)
API doesn't provide this information. Therefore, this stage must fetch each merged pull request
individually to import this information. A
diff --git a/doc/development/go_guide/dependencies.md b/doc/development/go_guide/dependencies.md
index 2a53fa590e3..7cad5bbf417 100644
--- a/doc/development/go_guide/dependencies.md
+++ b/doc/development/go_guide/dependencies.md
@@ -44,8 +44,8 @@ end with a timestamp and the first 12 characters of the commit identifier:
If a VCS tag matches one of these patterns, it is ignored.
-For a complete understanding of Go modules and versioning, see
-[this series of blog posts](https://go.dev/blog/using-go-modules)
+For a complete understanding of Go modules and versioning, see
+[this series of blog posts](https://go.dev/blog/using-go-modules)
on the official Go website.
## 'Module' vs 'Package'
diff --git a/doc/development/go_guide/index.md b/doc/development/go_guide/index.md
index 711b0662a8c..3adafa8750f 100644
--- a/doc/development/go_guide/index.md
+++ b/doc/development/go_guide/index.md
@@ -145,7 +145,7 @@ Go GitLab linter plugins are maintained in the [`gitlab-org/language-tools/go/li
## Dependencies
Dependencies should be kept to the minimum. The introduction of a new
-dependency should be argued in the merge request, as per our [Approval Guidelines](../code_review.md#approval-guidelines).
+dependency should be argued in the merge request, as per our [Approval Guidelines](../code_review.md#approval-guidelines).
Both [License Scanning](../../user/compliance/license_compliance/index.md)
and [Dependency Scanning](../../user/application_security/dependency_scanning/index.md)
should be activated on all projects to ensure new dependencies
@@ -153,7 +153,7 @@ security status and license compatibility.
### Modules
-In Go 1.11 and later, a standard dependency system is available behind the name
+In Go 1.11 and later, a standard dependency system is available behind the name
[Go Modules](https://github.com/golang/go/wiki/Modules). It provides a way to
define and lock dependencies for reproducible builds. It should be used
whenever possible.
@@ -166,7 +166,7 @@ projects, and makes merge requests easier to review.
In some cases, such as building a Go project for it to act as a dependency of a
CI run for another project, removing the `vendor/` directory means the code must
be downloaded repeatedly, which can lead to intermittent problems due to rate
-limiting or network failures. In these circumstances, you should
+limiting or network failures. In these circumstances, you should
[cache the downloaded code between](../../ci/caching/index.md#cache-go-dependencies).
There was a
diff --git a/doc/development/integrations/secure.md b/doc/development/integrations/secure.md
index 55e57a3c2ee..741fa8d89c4 100644
--- a/doc/development/integrations/secure.md
+++ b/doc/development/integrations/secure.md
@@ -509,7 +509,7 @@ which is shared by some of the analyzers that GitLab maintains. You can [contrib
new generic identifiers to if needed. Analyzers may also produce vendor-specific or product-specific
identifiers, which don't belong in the [common library](https://gitlab.com/gitlab-org/security-products/analyzers/common).
-The first item of the `identifiers` array is called the
+The first item of the `identifiers` array is called the
[primary identifier](../../user/application_security/terminology/index.md#primary-identifier).
The primary identifier is particularly important, because it is used to
[track vulnerabilities](#tracking-and-merging-vulnerabilities) as new commits are pushed to the repository.
diff --git a/doc/development/internal_api/index.md b/doc/development/internal_api/index.md
index 9b29af3e433..c35d40a7b7f 100644
--- a/doc/development/internal_api/index.md
+++ b/doc/development/internal_api/index.md
@@ -148,7 +148,7 @@ curl --request POST --header "Gitlab-Shared-Secret: <Base64 encoded token>" \
## Authorized Keys Check
This endpoint is called by the GitLab Shell authorized keys
-check. Which is called by OpenSSH for
+check. Which is called by OpenSSH for
[fast SSH key lookup](../../administration/operations/fast_ssh_key_lookup.md).
| Attribute | Type | Required | Description |
diff --git a/doc/development/lfs.md b/doc/development/lfs.md
index 5900eb68294..20157e9e805 100644
--- a/doc/development/lfs.md
+++ b/doc/development/lfs.md
@@ -76,13 +76,13 @@ process, which writes the contents to the standard output.
1. The archive data is sent back to the client.
In step 7, the `gitaly-lfs-smudge` filter must talk to Workhorse, not to
-Rails, or an invalid LFS blob is saved. To support this, GitLab 13.5
+Rails, or an invalid LFS blob is saved. To support this, GitLab 13.5
[changed the default Omnibus configuration to have Gitaly talk to the Workhorse](https://gitlab.com/gitlab-org/omnibus-gitlab/-/merge_requests/4592)
instead of Rails.
One side effect of this change: the correlation ID of the original
request is not preserved for the internal API requests made by Gitaly
(or `gitaly-lfs-smudge`), such as the one made in step 8. The
-correlation IDs for those API requests are random values until
+correlation IDs for those API requests are random values until
[this Workhorse issue](https://gitlab.com/gitlab-org/gitlab-workhorse/-/issues/309) is
resolved.
diff --git a/doc/development/logging.md b/doc/development/logging.md
index f1fa7f4c8c9..467fb68f3ae 100644
--- a/doc/development/logging.md
+++ b/doc/development/logging.md
@@ -385,7 +385,7 @@ end
## Additional steps with new log files
1. Consider log retention settings. By default, Omnibus rotates any
- logs in `/var/log/gitlab/gitlab-rails/*.log` every hour and
+ logs in `/var/log/gitlab/gitlab-rails/*.log` every hour and
[keep at most 30 compressed files](https://docs.gitlab.com/omnibus/settings/logs.html#logrotate).
On GitLab.com, that setting is only 6 compressed files. These settings should suffice
for most users, but you may need to tweak them in [Omnibus GitLab](https://gitlab.com/gitlab-org/omnibus-gitlab).
@@ -395,7 +395,7 @@ end
a merge request to the [`gitlab_fluentd`](https://gitlab.com/gitlab-cookbooks/gitlab_fluentd)
project. See [this example](https://gitlab.com/gitlab-cookbooks/gitlab_fluentd/-/merge_requests/51/diffs).
-1. Be sure to update the [GitLab CE/EE documentation](../administration/logs/index.md) and the
+1. Be sure to update the [GitLab CE/EE documentation](../administration/logs/index.md) and the
[GitLab.com runbooks](https://gitlab.com/gitlab-com/runbooks/blob/master/docs/logging/README.md).
## Control logging visibility
diff --git a/doc/development/merge_request_performance_guidelines.md b/doc/development/merge_request_performance_guidelines.md
index 7ff25705ae6..895fc6f92a4 100644
--- a/doc/development/merge_request_performance_guidelines.md
+++ b/doc/development/merge_request_performance_guidelines.md
@@ -394,7 +394,7 @@ query for every mention of `@alice`.
Caching data per transaction can be done using
[RequestStore](https://github.com/steveklabnik/request_store) (use
`Gitlab::SafeRequestStore` to avoid having to remember to check
-`RequestStore.active?`). Caching data in Redis can be done using
+`RequestStore.active?`). Caching data in Redis can be done using
[Rails' caching system](https://guides.rubyonrails.org/caching_with_rails.html).
## Pagination
diff --git a/doc/development/migration_style_guide.md b/doc/development/migration_style_guide.md
index 64d8b22f1b8..4e569579f37 100644
--- a/doc/development/migration_style_guide.md
+++ b/doc/development/migration_style_guide.md
@@ -1228,7 +1228,7 @@ If using a model in the migrations, you should first
[clear the column cache](https://api.rubyonrails.org/classes/ActiveRecord/ModelSchema/ClassMethods.html#method-i-reset_column_information)
using `reset_column_information`.
-If using a model that leverages single table inheritance (STI), there are
+If using a model that leverages single table inheritance (STI), there are
[special considerations](database/single_table_inheritance.md#in-migrations).
This avoids problems where a column that you are using was altered and cached
diff --git a/doc/development/pipelines.md b/doc/development/pipelines.md
index d57e5bbeb26..66f5bfde074 100644
--- a/doc/development/pipelines.md
+++ b/doc/development/pipelines.md
@@ -221,8 +221,8 @@ that includes `rspec-profile` in their name.
### Logging
-- Rails logging to `log/test.log` is disabled by default in CI
- [for performance reasons](https://jtway.co/speed-up-your-rails-test-suite-by-6-in-1-line-13fedb869ec4).
+- Rails logging to `log/test.log` is disabled by default in CI
+ [for performance reasons](https://jtway.co/speed-up-your-rails-test-suite-by-6-in-1-line-13fedb869ec4).
To override this setting, provide the
`RAILS_ENABLE_TEST_LOG` environment variable.
diff --git a/doc/development/rails_update.md b/doc/development/rails_update.md
index 9907a78421f..bda21860eae 100644
--- a/doc/development/rails_update.md
+++ b/doc/development/rails_update.md
@@ -27,7 +27,7 @@ We strive to run GitLab using the latest Rails releases to benefit from performa
1. Run `yarn patch-package @rails/ujs` after updating this to ensure our local patch file version matches.
1. Create an MR with the `pipeline:run-all-rspec` label and see if pipeline breaks.
1. To resolve and debug spec failures use `git bisect` against the rails repository. See the [debugging section](#git-bisect-against-rails) below.
-1. Include links to the Gem diffs between the two versions in the merge request description. For example, this is the gem diff for
+1. Include links to the Gem diffs between the two versions in the merge request description. For example, this is the gem diff for
[`activesupport` 6.1.3.2 to 6.1.4.1](https://my.diffend.io/gems/activerecord/6.1.3.2/6.1.4.1).
### Prepare an MR for Gitaly
diff --git a/doc/development/real_time.md b/doc/development/real_time.md
index 21f3ee1f3b2..f113d4dd3b7 100644
--- a/doc/development/real_time.md
+++ b/doc/development/real_time.md
@@ -60,7 +60,7 @@ downstream services.
To mitigate this, ensure that the code establishing the new WebSocket connection
is feature flagged and defaulted to `off`. A careful, percentage-based roll-out
-of the feature flag ensures that effects can be observed on the
+of the feature flag ensures that effects can be observed on the
[WebSocket dashboard](https://dashboards.gitlab.net/d/websockets-main/websockets-overview?orgId=1)
1. Create a
diff --git a/doc/development/redis/new_redis_instance.md b/doc/development/redis/new_redis_instance.md
index efaf1e5a6d0..24885b40eb9 100644
--- a/doc/development/redis/new_redis_instance.md
+++ b/doc/development/redis/new_redis_instance.md
@@ -265,7 +265,7 @@ instances to cope without this functional partition.
If we decide to keep the migration code:
- We should document the migration steps.
-- If we used a feature flag, we should ensure it's an
+- If we used a feature flag, we should ensure it's an
[ops type feature flag](../feature_flags/index.md#ops-type), as these are long-lived flags.
Otherwise, we can remove the flags and conclude the project.
diff --git a/doc/development/routing.md b/doc/development/routing.md
index 3d5857b4237..54a531730f9 100644
--- a/doc/development/routing.md
+++ b/doc/development/routing.md
@@ -6,7 +6,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
# Routing
-The GitLab backend is written primarily with Rails so it uses
+The GitLab backend is written primarily with Rails so it uses
[Rails routing](https://guides.rubyonrails.org/routing.html). Beside Rails best
practices, there are few rules unique to the GitLab application. To
support subgroups, GitLab project and group routes use the wildcard
diff --git a/doc/development/scalability.md b/doc/development/scalability.md
index b7ee0ca1167..66f436bd391 100644
--- a/doc/development/scalability.md
+++ b/doc/development/scalability.md
@@ -35,7 +35,7 @@ The application has a tight coupling to the database schema. When the
application starts, Rails queries the database schema, caching the tables and
column types for the data requested. Because of this schema cache, dropping a
column or table while the application is running can produce 500 errors to the
-user. This is why we have a
+user. This is why we have a
[process for dropping columns and other no-downtime changes](database/avoiding_downtime_in_migrations.md).
#### Multi-tenancy
@@ -61,10 +61,10 @@ There are two ways to deal with this:
- Sharding. Distribute data across multiple databases.
Partitioning is a built-in PostgreSQL feature and requires minimal changes
-in the application. However, it
+in the application. However, it
[requires PostgreSQL 11](https://www.2ndquadrant.com/en/blog/partitioning-evolution-postgresql-11/).
-For example, a natural way to partition is to
+For example, a natural way to partition is to
[partition tables by dates](https://gitlab.com/groups/gitlab-org/-/epics/2023). For example,
the `events` and `audit_events` table are natural candidates for this
kind of partitioning.
@@ -77,9 +77,9 @@ to abstract data access into API calls that abstract the database from
the application, but this is a significant amount of work.
There are solutions that may help abstract the sharding to some extent
-from the application. For example, we want to look at
+from the application. For example, we want to look at
[Citus Data](https://www.citusdata.com/product/community) closely. Citus Data
-provides a Rails plugin that adds a
+provides a Rails plugin that adds a
[tenant ID to ActiveRecord models](https://www.citusdata.com/blog/2017/01/05/easily-scale-out-multi-tenant-apps/).
Sharding can also be done based on feature verticals. This is the
@@ -97,11 +97,11 @@ systems.
#### Database size
-A recent
+A recent
[database checkup shows a breakdown of the table sizes on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/8022#master-1022016101-8).
Since `merge_request_diff_files` contains over 1 TB of data, we want to
-reduce/eliminate this table first. GitLab has support for
-[storing diffs in object storage](../administration/merge_request_diffs.md), which we
+reduce/eliminate this table first. GitLab has support for
+[storing diffs in object storage](../administration/merge_request_diffs.md), which we
[want to do on GitLab.com](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/7356).
#### High availability
@@ -149,7 +149,7 @@ limitation:
- Use a multi-threaded connection pooler (for example,
[Odyssey](https://gitlab.com/gitlab-com/gl-infra/reliability/-/issues/7776).
-On some Linux systems, it's possible to run
+On some Linux systems, it's possible to run
[multiple PgBouncer instances on the same port](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/4796).
On GitLab.com, we run multiple PgBouncer instances on different ports to
diff --git a/doc/development/service_ping/metrics_instrumentation.md b/doc/development/service_ping/metrics_instrumentation.md
index 9dc37386111..bde838df940 100644
--- a/doc/development/service_ping/metrics_instrumentation.md
+++ b/doc/development/service_ping/metrics_instrumentation.md
@@ -204,7 +204,7 @@ options:
```
## Redis HyperLogLog metrics
-
+
You can use Redis HyperLogLog metrics to track events not kept in the database and incremented for unique values such as unique users,
for example, a count of how many different users used the search bar.
diff --git a/doc/development/sidekiq/compatibility_across_updates.md b/doc/development/sidekiq/compatibility_across_updates.md
index 1d369b5a970..ac34d099202 100644
--- a/doc/development/sidekiq/compatibility_across_updates.md
+++ b/doc/development/sidekiq/compatibility_across_updates.md
@@ -18,13 +18,13 @@ several possible situations:
## Adding new workers
-On GitLab.com, we
-[do not currently have a Sidekiq deployment in the canary stage](https://gitlab.com/gitlab-org/gitlab/-/issues/19239).
+On GitLab.com, we
+[do not currently have a Sidekiq deployment in the canary stage](https://gitlab.com/gitlab-org/gitlab/-/issues/19239).
This means that a new worker than can be scheduled from an HTTP endpoint may
be scheduled from canary but not run on Sidekiq until the full
production deployment is complete. This can be several hours later than
scheduling the job. For some workers, this will not be a problem. For
-others - particularly [latency-sensitive jobs](worker_attributes.md#latency-sensitive-jobs) -
+others - particularly [latency-sensitive jobs](worker_attributes.md#latency-sensitive-jobs) -
this will result in a poor user experience.
This only applies to new worker classes when they are first introduced.
diff --git a/doc/development/sidekiq/idempotent_jobs.md b/doc/development/sidekiq/idempotent_jobs.md
index 5d1ebce763e..da36cdc72aa 100644
--- a/doc/development/sidekiq/idempotent_jobs.md
+++ b/doc/development/sidekiq/idempotent_jobs.md
@@ -78,7 +78,7 @@ GitLab supports two deduplication strategies:
- `until_executing`, which is the default strategy
- `until_executed`
-More [deduplication strategies have been suggested](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/195).
+More [deduplication strategies have been suggested](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/195).
If you are implementing a worker that could benefit from a different
strategy, please comment in the issue.
diff --git a/doc/development/sql.md b/doc/development/sql.md
index 7101bf7fb4b..e84c9747e32 100644
--- a/doc/development/sql.md
+++ b/doc/development/sql.md
@@ -80,7 +80,7 @@ USING GIN(column_name gin_trgm_ops);
```
The key here is the `GIN(column_name gin_trgm_ops)` part. This creates a
-[GIN index](https://www.postgresql.org/docs/current/gin.html)
+[GIN index](https://www.postgresql.org/docs/current/gin.html)
with the operator class set to `gin_trgm_ops`. These indexes
_can_ be used by `ILIKE` / `LIKE` and can lead to greatly improved performance.
One downside of these indexes is that they can easily get quite large (depending
diff --git a/doc/development/testing_guide/end_to_end/feature_flags.md b/doc/development/testing_guide/end_to_end/feature_flags.md
index 33f73304a26..ec8012dce6a 100644
--- a/doc/development/testing_guide/end_to_end/feature_flags.md
+++ b/doc/development/testing_guide/end_to_end/feature_flags.md
@@ -217,7 +217,7 @@ If enabling the feature flag results in E2E test failures, you can browse the ar
If an end-to-end test enables a feature flag, the end-to-end test suite can be used to test changes in a merge request
by running the `package-and-qa` job in the merge request pipeline. If the feature flag and relevant changes have already been merged, you can confirm that the tests
-pass on the default branch. The end-to-end tests run on the default branch every two hours, and the results are posted to a
+pass on the default branch. The end-to-end tests run on the default branch every two hours, and the results are posted to a
[Test Session Report, which is available in the testcase-sessions project](https://gitlab.com/gitlab-org/quality/testcase-sessions/-/issues?label_name%5B%5D=found%3Amain).
If the relevant tests do not enable the feature flag themselves, you can check if the tests will need to be updated by opening
diff --git a/doc/development/testing_guide/end_to_end/index.md b/doc/development/testing_guide/end_to_end/index.md
index 989d090d581..5f4a31ec633 100644
--- a/doc/development/testing_guide/end_to_end/index.md
+++ b/doc/development/testing_guide/end_to_end/index.md
@@ -140,7 +140,7 @@ a flaky test we first want to make sure that it's no longer flaky.
We can do that using the `ce:custom-parallel` and `ee:custom-parallel` jobs.
Both are manual jobs that you can configure using custom variables.
When clicking the name (not the play icon) of one of the parallel jobs,
-you are prompted to enter variables. You can use any of
+you are prompted to enter variables. You can use any of
[the variables that can be used with `gitlab-qa`](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/what_tests_can_be_run.md#supported-gitlab-environment-variables)
as well as these:
@@ -150,8 +150,8 @@ as well as these:
| `QA_TESTS` | The tests to run (no default, which means run all the tests in the scenario). Use file paths as you would when running tests via RSpec, for example, `qa/specs/features/ee/browser_ui` would include all the `EE` UI tests. |
| `QA_RSPEC_TAGS` | The RSpec tags to add (no default) |
-For now,
-[manual jobs with custom variables don't use the same variable when retried](https://gitlab.com/gitlab-org/gitlab/-/issues/31367),
+For now,
+[manual jobs with custom variables don't use the same variable when retried](https://gitlab.com/gitlab-org/gitlab/-/issues/31367),
so if you want to run the same tests multiple times,
specify the same variables in each `custom-parallel` job (up to as
many of the 10 available jobs that you want to run).
@@ -165,7 +165,7 @@ automatically started: it runs the QA smoke suite against the
You can also manually start the `review-qa-all`: it runs the full QA suite
against the [Review App](../review_apps.md).
-**This runs end-to-end tests against a Review App based on
+**This runs end-to-end tests against a Review App based on
[the official GitLab Helm chart](https://gitlab.com/gitlab-org/charts/gitlab/), itself deployed with custom
[Cloud Native components](https://gitlab.com/gitlab-org/build/CNG) built from your merge request's changes.**
@@ -244,7 +244,7 @@ Each type of scheduled pipeline generates a static link for the latest test repo
If you are not [testing code in a merge request](#testing-code-in-merge-requests),
there are two main options for running the tests. If you want to run
the existing tests against a live GitLab instance or against a pre-built Docker image,
-use the [GitLab QA orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md). See also
+use the [GitLab QA orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md). See also
[examples of the test scenarios you can run via the orchestrator](https://gitlab.com/gitlab-org/gitlab-qa/blob/master/docs/what_tests_can_be_run.md#examples).
On the other hand, if you would like to run against a local development GitLab
@@ -263,7 +263,7 @@ architecture. See the [documentation about it](https://gitlab.com/gitlab-org/git
Once you decided where to put [test environment orchestration scenarios](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/lib/gitlab/qa/scenario) and
[instance-level scenarios](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa/specs/features), take a look at the [GitLab QA README](https://gitlab.com/gitlab-org/gitlab/-/tree/master/qa/README.md),
-the [GitLab QA orchestrator README](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md),
+the [GitLab QA orchestrator README](https://gitlab.com/gitlab-org/gitlab-qa/tree/master/README.md),
and [the already existing instance-level scenarios](https://gitlab.com/gitlab-org/gitlab-foss/tree/master/qa/qa/specs/features).
### Consider **not** writing an end-to-end test
diff --git a/doc/development/testing_guide/index.md b/doc/development/testing_guide/index.md
index cd7c70e2eaa..e50902c4995 100644
--- a/doc/development/testing_guide/index.md
+++ b/doc/development/testing_guide/index.md
@@ -9,7 +9,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
This document describes various guidelines and best practices for automated
testing of the GitLab project.
-It is meant to be an _extension_ of the
+It is meant to be an _extension_ of the
[Thoughtbot testing style guide](https://github.com/thoughtbot/guides/tree/master/testing-rspec). If
this guide defines a rule that contradicts the Thoughtbot guide, this guide
takes precedence. Some guidelines may be repeated verbatim to stress their
diff --git a/doc/development/testing_guide/testing_migrations_guide.md b/doc/development/testing_guide/testing_migrations_guide.md
index 261a4f4a27e..3006a2230ac 100644
--- a/doc/development/testing_guide/testing_migrations_guide.md
+++ b/doc/development/testing_guide/testing_migrations_guide.md
@@ -317,7 +317,7 @@ To test these you usually have to:
- Verify that the expected jobs were scheduled, with the correct set
of records, the correct batch size, interval, etc.
-The behavior of the background migration itself needs to be verified in a
+The behavior of the background migration itself needs to be verified in a
[separate test for the background migration class](#example-background-migration-test).
This spec tests the
diff --git a/doc/development/workhorse/index.md b/doc/development/workhorse/index.md
index 962124248ef..f210f511954 100644
--- a/doc/development/workhorse/index.md
+++ b/doc/development/workhorse/index.md
@@ -10,7 +10,7 @@ GitLab Workhorse is a smart reverse proxy for GitLab. It handles
"large" HTTP requests such as file downloads, file uploads, Git
push/pull and Git archive downloads.
-Workhorse itself is not a feature, but there are
+Workhorse itself is not a feature, but there are
[several features in GitLab](gitlab_features.md) that would not work efficiently without Workhorse.
The canonical source for Workhorse is
diff --git a/doc/install/azure/index.md b/doc/install/azure/index.md
index 782918dba32..9c42a244f88 100644
--- a/doc/install/azure/index.md
+++ b/doc/install/azure/index.md
@@ -233,7 +233,7 @@ The first thing that appears is the sign-in page. GitLab creates an administrato
The credentials are:
- Username: `root`
-- Password: the password is automatically created, and there are
+- Password: the password is automatically created, and there are
[two ways to find it](https://docs.bitnami.com/azure/faq/get-started/find-credentials/).
After signing in, be sure to immediately [change the password](../../user/profile/index.md#change-your-password).
diff --git a/doc/install/installation.md b/doc/install/installation.md
index 2f6adb06322..5982e354ae5 100644
--- a/doc/install/installation.md
+++ b/doc/install/installation.md
@@ -129,7 +129,7 @@ sudo apt-get install libkrb5-dev
### Git
-From GitLab 13.6, we recommend you use the
+From GitLab 13.6, we recommend you use the
[Git version provided by Gitaly](https://gitlab.com/gitlab-org/gitaly/-/issues/2729)
that:
@@ -239,7 +239,7 @@ sudo make install
GitLab has several daemons written in Go. To install
GitLab we need a Go compiler. The instructions below assume you use 64-bit
-Linux. You can find downloads for other platforms at the
+Linux. You can find downloads for other platforms at the
[Go download page](https://go.dev/dl).
```shell
diff --git a/doc/operations/metrics/embed_grafana.md b/doc/operations/metrics/embed_grafana.md
index 17ff0ff01a3..1a1ac77ce23 100644
--- a/doc/operations/metrics/embed_grafana.md
+++ b/doc/operations/metrics/embed_grafana.md
@@ -14,7 +14,7 @@ embed Grafana panels using either:
## Use Grafana-rendered images
-You can embed live [Grafana](https://docs.gitlab.com/omnibus/settings/grafana.html) panels as
+You can embed live [Grafana](https://docs.gitlab.com/omnibus/settings/grafana.html) panels as
[a direct link](https://grafana.com/docs/grafana/v7.5/sharing/share-panel/#use-direct-link).
Your Grafana instance must:
diff --git a/doc/policy/maintenance.md b/doc/policy/maintenance.md
index e77a0459150..652cf4a2018 100644
--- a/doc/policy/maintenance.md
+++ b/doc/policy/maintenance.md
@@ -16,7 +16,7 @@ Our current policy is:
- Backporting security fixes **to the previous two monthly releases in addition to the current stable release**. (See [security releases](#security-releases).)
In rare cases, release managers may make an exception and backport to more than
-the last two monthly releases. See
+the last two monthly releases. See
[Backporting to older releases](#backporting-to-older-releases) for more information.
## Versioning
diff --git a/doc/raketasks/backup_gitlab.md b/doc/raketasks/backup_gitlab.md
index 4629364ce3d..7966e47f44c 100644
--- a/doc/raketasks/backup_gitlab.md
+++ b/doc/raketasks/backup_gitlab.md
@@ -452,8 +452,8 @@ gitlab_rails['backup_upload_storage_options'] = {
##### SSE-KMS
-To enable SSE-KMS, you'll need the
-[KMS key via its Amazon Resource Name (ARN) in the `arn:aws:kms:region:acct-id:key/key-id` format](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html).
+To enable SSE-KMS, you'll need the
+[KMS key via its Amazon Resource Name (ARN) in the `arn:aws:kms:region:acct-id:key/key-id` format](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html).
Under the `backup_upload_storage_options` configuration setting, set:
- `server_side_encryption` to `aws:kms`.
diff --git a/doc/raketasks/backup_restore.md b/doc/raketasks/backup_restore.md
index 878511b3e14..3192e9ace3b 100644
--- a/doc/raketasks/backup_restore.md
+++ b/doc/raketasks/backup_restore.md
@@ -12,7 +12,7 @@ An application data backup creates an archive file that contains the database,
all repositories and all attachments.
You can only restore a backup to **exactly the same version and type (CE/EE)**
-of GitLab on which it was created. The best way to
+of GitLab on which it was created. The best way to
[migrate your projects from one server to another](#migrate-to-a-new-server) is through a backup and restore.
WARNING:
@@ -190,7 +190,7 @@ tables will [be logged by PostgreSQL](../administration/logs/index.md#postgresql
ERROR: relation "tablename" does not exist at character 123
```
-This happens because the task uses `pg_dump`, which
+This happens because the task uses `pg_dump`, which
[sets a null search path and explicitly includes the schema in every SQL query](https://gitlab.com/gitlab-org/gitlab/-/issues/23211)
to address [CVE-2018-1058](https://www.postgresql.org/about/news/postgresql-103-968-9512-9417-and-9322-released-1834/).
diff --git a/doc/security/information_exclusivity.md b/doc/security/information_exclusivity.md
index 754d5fff843..2eeb436316f 100644
--- a/doc/security/information_exclusivity.md
+++ b/doc/security/information_exclusivity.md
@@ -24,7 +24,7 @@ limitation.
You can take steps to prevent unintentional sharing and information
destruction. This limitation is the reason why only certain people are allowed
to [add users to a project](../user/project/members/index.md)
-and why only a GitLab administrator can
+and why only a GitLab administrator can
[force push a protected branch](../user/project/protected_branches.md).
<!-- ## Troubleshooting
diff --git a/doc/subscriptions/gitlab_com/index.md b/doc/subscriptions/gitlab_com/index.md
index 6b83a00cac1..2e547a16f5e 100644
--- a/doc/subscriptions/gitlab_com/index.md
+++ b/doc/subscriptions/gitlab_com/index.md
@@ -299,7 +299,7 @@ for your personal or group namespace. CI/CD minutes are a **one-time purchase**,
NOTE:
Free namespaces are subject to a 5GB storage and 10GB transfer [soft limit](https://about.gitlab.com/pricing/). Once all storage is available to view in the usage quota workflow, GitLab will automatically enforce the namespace storage limit and the project limit will be removed. This change will be announced separately. The storage and transfer add-on can be purchased to increase the limits.
-Projects have a free storage quota of 10 GB. To exceed this quota you must first
+Projects have a free storage quota of 10 GB. To exceed this quota you must first
[purchase one or more storage subscription units](#purchase-more-storage-and-transfer). Each unit provides 10 GB of additional
storage per namespace. A storage subscription is renewed annually. For more details, see
[Usage Quotas](../../user/usage_quotas.md).
diff --git a/doc/subscriptions/index.md b/doc/subscriptions/index.md
index ed96fbd91ef..27194f43b53 100644
--- a/doc/subscriptions/index.md
+++ b/doc/subscriptions/index.md
@@ -109,7 +109,7 @@ Purchases in the Customers Portal require a credit card on record as a payment m
multiple credit cards to your account, so that purchases for different products are charged to the
correct card.
-If you would like to use an alternative method to pay, please
+If you would like to use an alternative method to pay, please
[contact our Sales team](https://about.gitlab.com/sales/).
To change your payment method:
diff --git a/doc/topics/release_your_application.md b/doc/topics/release_your_application.md
index 61ca1468dca..31fc9b4dbb9 100644
--- a/doc/topics/release_your_application.md
+++ b/doc/topics/release_your_application.md
@@ -30,13 +30,13 @@ to Kubernetes clusters using the [GitLab agent](../user/clusters/agent/install/i
#### GitOps deployments **(PREMIUM)**
-With the [GitLab agent for Kubernetes](../user/clusters/agent/install/index.md), you can perform
-[pull-based deployments of Kubernetes manifests](../user/clusters/agent/gitops.md). This provides a scalable, secure,
+With the [GitLab agent for Kubernetes](../user/clusters/agent/install/index.md), you can perform
+[pull-based deployments of Kubernetes manifests](../user/clusters/agent/gitops.md). This provides a scalable, secure,
and cloud-native approach to manage Kubernetes deployments.
#### Deploy to Kubernetes from GitLab CI/CD
-With the [GitLab agent for Kubernetes](../user/clusters/agent/install/index.md), you can perform
+With the [GitLab agent for Kubernetes](../user/clusters/agent/install/index.md), you can perform
[push-based deployments](../user/clusters/agent/ci_cd_workflow.md) from GitLab CI/CD. The agent provides
a secure and reliable connection between GitLab and your Kubernetes cluster.
diff --git a/doc/update/index.md b/doc/update/index.md
index f7662d3cb9d..8facd86db5b 100644
--- a/doc/update/index.md
+++ b/doc/update/index.md
@@ -41,7 +41,7 @@ There are also instructions when you want to
### Installation from source
-- [Upgrading Community Edition and Enterprise Edition from source](upgrading_from_source.md) -
+- [Upgrading Community Edition and Enterprise Edition from source](upgrading_from_source.md) -
The guidelines for upgrading Community Edition and Enterprise Edition from source.
- [Patch versions](patch_versions.md) guide includes the steps needed for a
patch version, such as 13.2.0 to 13.2.1, and apply to both Community and Enterprise
@@ -328,7 +328,7 @@ sudo -u git -H bundle exec rake gitlab:elastic:list_pending_migrations
### What do you do if your Advanced Search migrations are stuck?
In GitLab 15.0, an Advanced Search migration named `DeleteOrphanedCommit` can be permanently stuck
-in a pending state across upgrades. This issue
+in a pending state across upgrades. This issue
[is corrected in GitLab 15.1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/89539).
If you are a self-managed customer who uses GitLab 15.0 with Advanced Search, you will experience performance degradation.
@@ -1051,7 +1051,7 @@ In 13.1.0, you must upgrade to either:
Failure to do so results in internal errors in the Gitaly service in some RPCs due
to the use of the new `--end-of-options` Git flag.
-Additionally, in GitLab 13.1.0, the version of
+Additionally, in GitLab 13.1.0, the version of
[Rails was upgraded from 6.0.3 to 6.0.3.1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/33454).
The Rails upgrade included a change to CSRF token generation which is
not backwards-compatible - GitLab servers with the new Rails version
diff --git a/doc/update/upgrading_from_source.md b/doc/update/upgrading_from_source.md
index 2df11e8f741..4f54e69d8c9 100644
--- a/doc/update/upgrading_from_source.md
+++ b/doc/update/upgrading_from_source.md
@@ -119,7 +119,7 @@ rm go1.17.10.linux-amd64.tar.gz
To check you are running the minimum required Git version, see
[Git versions](../install/installation.md#software-requirements).
-From GitLab 13.6, we recommend you use the
+From GitLab 13.6, we recommend you use the
[Git version provided by Gitaly](https://gitlab.com/gitlab-org/gitaly/-/issues/2729)
that:
diff --git a/doc/update/zero_downtime.md b/doc/update/zero_downtime.md
index 2f6ce5bc75e..29bb1c4bcb4 100644
--- a/doc/update/zero_downtime.md
+++ b/doc/update/zero_downtime.md
@@ -53,7 +53,7 @@ migrating data. Background migrations are only added in the monthly releases.
Certain major/minor releases may require a set of background migrations to be
finished. To guarantee this, such a release processes any remaining jobs
before continuing the upgrading procedure. While this doesn't require downtime
-(if the above conditions are met) we require that you
+(if the above conditions are met) we require that you
[wait for background migrations to complete](index.md#checking-for-background-migrations-before-upgrading)
between each major/minor release upgrade.
The time necessary to complete these migrations can be reduced by
@@ -585,7 +585,7 @@ the update on the **primary** node:
sudo gitlab-rake db:migrate
```
-- After the update is finalized on the primary node, hot reload `puma` and
+- After the update is finalized on the primary node, hot reload `puma` and
restart `sidekiq` and `geo-logcursor` services on **all primary and secondary**
nodes:
diff --git a/doc/user/admin_area/review_abuse_reports.md b/doc/user/admin_area/review_abuse_reports.md
index a5e7fcb1b8e..c5b6b3d86ba 100644
--- a/doc/user/admin_area/review_abuse_reports.md
+++ b/doc/user/admin_area/review_abuse_reports.md
@@ -26,7 +26,7 @@ The notification email address can also be set and retrieved
## Reporting abuse
-To find out more about reporting abuse, see
+To find out more about reporting abuse, see
[abuse reports user documentation](../report_abuse.md).
## Resolving abuse reports
diff --git a/doc/user/application_security/dast/checks/16.7.md b/doc/user/application_security/dast/checks/16.7.md
index 2e6607575db..a052149ee4d 100644
--- a/doc/user/application_security/dast/checks/16.7.md
+++ b/doc/user/application_security/dast/checks/16.7.md
@@ -25,7 +25,7 @@ Only three directives are applicable for the `Strict-Transport-Security` header.
Note that invalid directives, or the `Strict-Transport-Security` header appearing more than once (if the values are
different) is considered invalid.
-Prior to adding to this security configuration to your website, it is recommended you review the hstspreload.org
+Prior to adding to this security configuration to your website, it is recommended you review the hstspreload.org
[Deployment Recommendations](https://hstspreload.org/#deployment-recommendations).
## Details
diff --git a/doc/user/application_security/dast/checks/209.1.md b/doc/user/application_security/dast/checks/209.1.md
index 2e4163bdec0..f2713a70afd 100644
--- a/doc/user/application_security/dast/checks/209.1.md
+++ b/doc/user/application_security/dast/checks/209.1.md
@@ -9,17 +9,17 @@ info: To determine the technical writer assigned to the Stage/Group associated w
## Description
The application was found to return error data such as stack traces. Depending on the data contained within the error message,
-this information could be used by an attacker to conduct further attacks. While stack traces are helpful during development
-and debugging, they should not be presented to users when an error occurs.
+this information could be used by an attacker to conduct further attacks. While stack traces are helpful during development
+and debugging, they should not be presented to users when an error occurs.
## Remediation
Applications should handle exception conditions internally and map known failure types to error codes that can be displayed
to a user. These error codes should be customized to the application and returned along with the relevant HTTP error code.
-When an error occurs, the application identifies the error type or class, and displays a numerical value to the
-user. Requests should also be tracked so when a user is presented with an error code, it has a corresponding request ID.
-Support teams can then correlate the HTTP error, the customized error code, and the request ID in the log files to
+When an error occurs, the application identifies the error type or class, and displays a numerical value to the
+user. Requests should also be tracked so when a user is presented with an error code, it has a corresponding request ID.
+Support teams can then correlate the HTTP error, the customized error code, and the request ID in the log files to
determine the root cause of the error without leaking details to the end user.
Example of returning customized errors:
diff --git a/doc/user/application_security/policies/index.md b/doc/user/application_security/policies/index.md
index 53f9c400259..edb9f401b9e 100644
--- a/doc/user/application_security/policies/index.md
+++ b/doc/user/application_security/policies/index.md
@@ -144,6 +144,6 @@ for more information on the product direction of security policies within GitLab
When you create a new security policy or change an existing policy, a new branch is automatically created with the branch name following the pattern `update-policy-<timestamp>`. For example: `update-policy-1659094451`.
-If you have group or instance push rules that do not allow branch name patterns that contain the text `update-policy-<timestamp>`, you will get an error that states `Branch name does not follow the pattern 'update-policy-<timestamp>'`.
+If you have group or instance push rules that do not allow branch name patterns that contain the text `update-policy-<timestamp>`, you will get an error that states `Branch name does not follow the pattern 'update-policy-<timestamp>'`.
The workaround is to amend your group or instance push rules to allow branches following the pattern `update-policy-` followed by an integer timestamp.
diff --git a/doc/user/clusters/agent/ci_cd_workflow.md b/doc/user/clusters/agent/ci_cd_workflow.md
index 16b92eb92a3..88625b89952 100644
--- a/doc/user/clusters/agent/ci_cd_workflow.md
+++ b/doc/user/clusters/agent/ci_cd_workflow.md
@@ -127,7 +127,7 @@ Run `kubectl config get-contexts`.
### Environments with both certificate-based and agent-based connections
-When you deploy to an environment that has both a
+When you deploy to an environment that has both a
[certificate-based cluster](../../infrastructure/clusters/index.md) (deprecated) and an agent connection:
- The certificate-based cluster's context is called `gitlab-deploy`. This context
diff --git a/doc/user/clusters/environments.md b/doc/user/clusters/environments.md
index cf71729b517..96f41531576 100644
--- a/doc/user/clusters/environments.md
+++ b/doc/user/clusters/environments.md
@@ -33,7 +33,7 @@ With cluster environments, you can gain insight into:
![Cluster environments page](img/cluster_environments_table_v12_3.png)
-Access to cluster environments is restricted to
+Access to cluster environments is restricted to
[group maintainers and owners](../permissions.md#group-members-permissions)
## Usage
diff --git a/doc/user/gitlab_com/index.md b/doc/user/gitlab_com/index.md
index 53e459f7a09..2152ca39e5a 100644
--- a/doc/user/gitlab_com/index.md
+++ b/doc/user/gitlab_com/index.md
@@ -336,7 +336,7 @@ documentation.
When a request is rate limited, GitLab responds with a `429` status
code. The client should wait before attempting the request again. There
-are also informational headers with this response detailed in
+are also informational headers with this response detailed in
[rate limiting responses](#rate-limiting-responses).
The following table describes the rate limits for GitLab.com, both before and
@@ -358,8 +358,8 @@ after the limits change in January, 2021:
| **Pipeline creation** requests (for a given **project, user, and commit**) | | **25** requests per minute |
| **Alert integration endpoint** requests (for a given **project**) | | **3600** requests per hour |
-More details are available on the rate limits for
-[protected paths](#protected-paths-throttle) and
+More details are available on the rate limits for
+[protected paths](#protected-paths-throttle) and
[raw endpoints](../../user/admin_area/settings/rate_limits_on_raw_endpoints.md).
GitLab can rate-limit requests at several layers. The rate limits listed here
diff --git a/doc/user/group/iterations/index.md b/doc/user/group/iterations/index.md
index a5102d27302..7a1db41e4f8 100644
--- a/doc/user/group/iterations/index.md
+++ b/doc/user/group/iterations/index.md
@@ -203,7 +203,7 @@ To create an iteration cadence:
1. Select **New iteration cadence**.
1. Enter the title and description of the iteration cadence.
1. To manually manage the iteration cadence, clear the **Enable automatic scheduling** checkbox and skip the next step.
-1. Complete the required fields to use automatic scheduling.
+1. Complete the required fields to use automatic scheduling.
- Select the automation start date of the iteration cadence. Iterations will be scheduled to
begin on the same day of the week as the day of the week of the start date.
- From the **Duration** dropdown list, select how many weeks each iteration should last.
@@ -241,7 +241,7 @@ doesn't delete the eight existing upcoming iterations.
1. Complete the required fields **Duration**, **Upcoming iterations**, and **Automation start date**.
For **Automation start date**, you can select any date that doesn't overlap with the existing open iterations.
If you have upcoming iterations, the automatic scheduling adjusts them appropriately to fit
-your chosen duration.
+your chosen duration.
1. Select **Save changes**.
When you want to manage your iterations cadence manually again, edit your cadence and uncheck the **Enable automatic scheduling** checkbox.
@@ -267,7 +267,7 @@ after the conversion you have the following iterations:
- Monday, April 18 - Sunday, April 24 (upcoming)
- Monday, April 25 - Sunday, May 1 (upcoming)
-Your existing upcoming iteration "Tuesday, April 12 - Friday, April 15"
+Your existing upcoming iteration "Tuesday, April 12 - Friday, April 15"
is changed to "April 18 - Sunday, April 24".
An additional upcoming iteration "April 25 - Sunday, May 1" is scheduled
diff --git a/doc/user/infrastructure/clusters/manage/management_project_applications/certmanager.md b/doc/user/infrastructure/clusters/manage/management_project_applications/certmanager.md
index 51fd626ce0f..5f77b7e402a 100644
--- a/doc/user/infrastructure/clusters/manage/management_project_applications/certmanager.md
+++ b/doc/user/infrastructure/clusters/manage/management_project_applications/certmanager.md
@@ -30,8 +30,8 @@ And update the `applications/cert-manager/helmfile.yaml` with a valid email addr
```
NOTE:
-If your Kubernetes version is earlier than 1.20 and you are
-[migrating from GitLab Managed Apps to a cluster management project](../../../../clusters/migrating_from_gma_to_project_template.md),
+If your Kubernetes version is earlier than 1.20 and you are
+[migrating from GitLab Managed Apps to a cluster management project](../../../../clusters/migrating_from_gma_to_project_template.md),
then you can instead use `- path: applications/cert-manager-legacy/helmfile.yaml` to
take over an existing release of cert-manager v0.10.
diff --git a/doc/user/packages/generic_packages/index.md b/doc/user/packages/generic_packages/index.md
index d4acb14b9ca..eb8cdd39517 100644
--- a/doc/user/packages/generic_packages/index.md
+++ b/doc/user/packages/generic_packages/index.md
@@ -215,7 +215,7 @@ It also demonstrates how to manage a semantic version for the generic package: s
### Internal Server error on large file uploads to S3
-S3-compatible object storage [limits the size of a single PUT request to 5GB](https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html). If the `aws_signature_version` is set to `2` in the [object storage connection settings](../../../administration/object_storage.md), attempting to publish a package file larger than the 5GB limit can result in a `HTTP 500: Internal Server Error` response.
+S3-compatible object storage [limits the size of a single PUT request to 5GB](https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html). If the `aws_signature_version` is set to `2` in the [object storage connection settings](../../../administration/object_storage.md), attempting to publish a package file larger than the 5GB limit can result in a `HTTP 500: Internal Server Error` response.
If you are receiving `HTTP 500: Internal Server Error` responses when publishing large files to S3, set the `aws_signature_version` to `4`:
diff --git a/doc/user/project/canary_deployments.md b/doc/user/project/canary_deployments.md
index f8494116655..aac704e2cdd 100644
--- a/doc/user/project/canary_deployments.md
+++ b/doc/user/project/canary_deployments.md
@@ -38,8 +38,8 @@ want to make sure the performance stays the same, or improves. Developers need
to be careful when using canaries with user-facing changes, because by default,
requests from the same user are randomly distributed between canary and
non-canary pods, which could result in confusion or even errors. If needed, you
-may want to consider
-[setting `service.spec.sessionAffinity` to `ClientIP` in your Kubernetes service definitions](https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies),
+may want to consider
+[setting `service.spec.sessionAffinity` to `ClientIP` in your Kubernetes service definitions](https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies),
but that is beyond the scope of this document.
## Advanced traffic control with Canary Ingress
diff --git a/doc/user/project/clusters/add_gke_clusters.md b/doc/user/project/clusters/add_gke_clusters.md
index bfaf9aab7b7..1e5a322748d 100644
--- a/doc/user/project/clusters/add_gke_clusters.md
+++ b/doc/user/project/clusters/add_gke_clusters.md
@@ -51,7 +51,7 @@ Note the following:
cluster's pod address IP range is set to `/16` instead of the regular `/14`. `/16` is a CIDR
notation.
- GitLab requires basic authentication enabled and a client certificate issued for the cluster to
- set up an [initial service account](cluster_access.md). In
+ set up an [initial service account](cluster_access.md). In
[GitLab versions 11.10 and later](https://gitlab.com/gitlab-org/gitlab-foss/-/issues/58208), the cluster creation process
explicitly requests GKE to create clusters with basic authentication enabled and a client
certificate.
diff --git a/doc/user/project/deploy_boards.md b/doc/user/project/deploy_boards.md
index 41afbdada6b..63010610605 100644
--- a/doc/user/project/deploy_boards.md
+++ b/doc/user/project/deploy_boards.md
@@ -116,7 +116,7 @@ To display the deploy boards for a specific [environment](../../ci/environments/
Kubernetes.
NOTE:
- Matching based on the Kubernetes `app` label was removed in
+ Matching based on the Kubernetes `app` label was removed in
[GitLab 12.1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/14020).
To migrate, please apply the required annotations (see above) and
re-deploy your application. If you are using Auto DevOps, this will
diff --git a/doc/user/project/git_attributes.md b/doc/user/project/git_attributes.md
index 90f64b7262c..f2e4b65e3d4 100644
--- a/doc/user/project/git_attributes.md
+++ b/doc/user/project/git_attributes.md
@@ -23,5 +23,5 @@ ignored.
## Syntax Highlighting
The `.gitattributes` file can be used to define which language to use when
-syntax highlighting files and diffs. See
+syntax highlighting files and diffs. See
["Syntax Highlighting"](highlighting.md) for more information.
diff --git a/doc/user/project/import/clearcase.md b/doc/user/project/import/clearcase.md
index d9ad0c57d79..2d9f92c38e4 100644
--- a/doc/user/project/import/clearcase.md
+++ b/doc/user/project/import/clearcase.md
@@ -11,7 +11,7 @@ info: To determine the technical writer assigned to the Stage/Group associated w
tools developed by IBM which also include a centralized version control system
similar to Git.
-A good read of ClearCase's basic concepts is can be found in this
+A good read of ClearCase's basic concepts is can be found in this
[StackOverflow post](https://stackoverflow.com/a/645771/974710).
The following table illustrates the main differences between ClearCase and Git:
diff --git a/doc/user/project/integrations/pumble.md b/doc/user/project/integrations/pumble.md
index cd28a7c0048..4d22fdd3fb9 100644
--- a/doc/user/project/integrations/pumble.md
+++ b/doc/user/project/integrations/pumble.md
@@ -25,7 +25,7 @@ notifications:
1. To enable the integration for your group or project:
1. In your group or project, on the left sidebar, select **Settings > Integrations**.
-1. To enable the integration for your instance:
+1. To enable the integration for your instance:
1. On the top bar, select **Menu > Admin**.
1. On the left sidebar, select **Settings > Integrations**.
1. Select the **Pumble** integration.
@@ -36,4 +36,4 @@ notifications:
1. Optional. To test the integration, select **Test settings**.
1. Select **Save changes**.
-The Pumble channel begins to receive all applicable GitLab events.
+The Pumble channel begins to receive all applicable GitLab events.
diff --git a/doc/user/project/members/share_project_with_groups.md b/doc/user/project/members/share_project_with_groups.md
index 3d5b855a9d3..1196fe3c524 100644
--- a/doc/user/project/members/share_project_with_groups.md
+++ b/doc/user/project/members/share_project_with_groups.md
@@ -83,7 +83,7 @@ The following outcomes occur:
## Share project with group lock
-It is possible to prevent projects in a group from
+It is possible to prevent projects in a group from
[sharing a project with another group](../members/share_project_with_groups.md).
This allows for tighter control over project access.
diff --git a/doc/user/project/pages/getting_started/pages_from_scratch.md b/doc/user/project/pages/getting_started/pages_from_scratch.md
index 68a2a6a80ad..79cd841117a 100644
--- a/doc/user/project/pages/getting_started/pages_from_scratch.md
+++ b/doc/user/project/pages/getting_started/pages_from_scratch.md
@@ -420,7 +420,7 @@ Now GitLab CI/CD not only builds the website, but also:
For more information, see the following blog posts.
-- Use GitLab CI/CD `environments` to
+- Use GitLab CI/CD `environments` to
[deploy your web app to staging and production](https://about.gitlab.com/blog/2021/02/05/ci-deployment-and-environments/).
- Learn how to run jobs
[sequentially, in parallel, or build a custom pipeline](https://about.gitlab.com/blog/2016/07/29/the-basics-of-gitlab-ci/).
diff --git a/doc/user/project/push_options.md b/doc/user/project/push_options.md
index d02609cbdc7..3eb333f5785 100644
--- a/doc/user/project/push_options.md
+++ b/doc/user/project/push_options.md
@@ -102,7 +102,7 @@ long Git commands.
### Merge when pipeline succeeds alias
-To set up a Git alias for the
+To set up a Git alias for the
[merge when pipeline succeeds Git push option](#push-options-for-merge-requests):
```shell
diff --git a/doc/user/project/repository/reducing_the_repo_size_using_git.md b/doc/user/project/repository/reducing_the_repo_size_using_git.md
index 344c288b607..f209c7ef137 100644
--- a/doc/user/project/repository/reducing_the_repo_size_using_git.md
+++ b/doc/user/project/repository/reducing_the_repo_size_using_git.md
@@ -46,7 +46,7 @@ To purge files from a GitLab repository:
[`git-sizer`](https://github.com/github/git-sizer#getting-started)
using a supported package manager or from source.
-1. Generate a fresh
+1. Generate a fresh
[export from the project](../settings/import_export.md#export-a-project-and-its-data) and download it.
This project export contains a backup copy of your repository *and* refs
we can use to purge files from your repository.
diff --git a/doc/user/usage_quotas.md b/doc/user/usage_quotas.md
index 5d78b4bb795..5260e0396a7 100644
--- a/doc/user/usage_quotas.md
+++ b/doc/user/usage_quotas.md
@@ -39,9 +39,9 @@ To prevent exceeding the namespace storage quota, you can:
### Namespace storage limit enforcement schedule
-Storage limits for GitLab SaaS Free tier namespaces will not be enforced prior to 2022-10-19. Storage limits for GitLab SaaS Paid tier namespaces will not be enforced for prior to 2023-02-15.
+Storage limits for GitLab SaaS Free tier namespaces will not be enforced prior to 2022-10-19. Storage limits for GitLab SaaS Paid tier namespaces will not be enforced for prior to 2023-02-15.
-Impacted users are notified via email and in-app notifications at least 60 days prior to enforcement.
+Impacted users are notified via email and in-app notifications at least 60 days prior to enforcement.
### Project storage limit
diff --git a/lib/api/groups.rb b/lib/api/groups.rb
index 82bbab5d7d4..200e5f7bdfe 100644
--- a/lib/api/groups.rb
+++ b/lib/api/groups.rb
@@ -96,9 +96,9 @@ module API
present options[:with].prepare_relation(projects, options), options
end
- def present_groups(params, groups)
+ def present_groups(params, groups, serializer: Entities::Group)
options = {
- with: Entities::Group,
+ with: serializer,
current_user: current_user,
statistics: params[:statistics] && current_user&.admin?
}
@@ -392,6 +392,21 @@ module API
end
end
+ desc 'Get the groups to where the current group can be transferred to'
+ params do
+ optional :search, type: String, desc: 'Return list of namespaces matching the search criteria'
+ use :pagination
+ end
+ get ':id/transfer_locations', feature_category: :subgroups do
+ authorize! :admin_group, user_group
+ args = declared_params(include_missing: false)
+
+ groups = ::Groups::AcceptingGroupTransfersFinder.new(current_user, user_group, args).execute
+ groups = groups.with_route
+
+ present_groups params, groups, serializer: Entities::PublicGroupDetails
+ end
+
desc 'Transfer a group to a new parent group or promote a subgroup to a root group'
params do
optional :group_id,
diff --git a/lib/gitlab/uploads/migration_helper.rb b/lib/gitlab/uploads/migration_helper.rb
index deab2cd43a6..712512d0e02 100644
--- a/lib/gitlab/uploads/migration_helper.rb
+++ b/lib/gitlab/uploads/migration_helper.rb
@@ -5,27 +5,10 @@ module Gitlab
class MigrationHelper
attr_reader :logger
- CATEGORIES = [%w(AvatarUploader Project :avatar),
- %w(AvatarUploader Group :avatar),
- %w(AvatarUploader User :avatar),
- %w(AttachmentUploader Note :attachment),
- %w(AttachmentUploader Appearance :logo),
- %w(AttachmentUploader Appearance :header_logo),
- %w(FaviconUploader Appearance :favicon),
- %w(FileUploader Project),
- %w(PersonalFileUploader Snippet),
- %w(NamespaceFileUploader Snippet),
- %w(DesignManagement::DesignV432x230Uploader DesignManagement::Action :image_v432x230),
- %w(FileUploader MergeRequest)].freeze
-
def initialize(args, logger)
prepare_variables(args, logger)
end
- def self.categories
- CATEGORIES
- end
-
def migrate_to_remote_storage
@to_store = ObjectStorage::Store::REMOTE
@@ -45,17 +28,14 @@ module Gitlab
end
def prepare_variables(args, logger)
- @mounted_as = args.mounted_as&.gsub(':', '')&.to_sym
- @uploader_class = args.uploader_class.constantize
- @model_class = args.model_class.constantize
+ @mounted_as = args.mounted_as&.gsub(':', '')
+ @uploader_class = args.uploader_class
+ @model_class = args.model_class&.constantize
@logger = logger
end
def enqueue_batch(batch, index)
- job = ObjectStorage::MigrateUploadsWorker.enqueue!(batch,
- @model_class,
- @mounted_as,
- @to_store)
+ job = ObjectStorage::MigrateUploadsWorker.enqueue!(batch, @to_store)
logger.info(message: "[Uploads migration] Enqueued upload migration job", index: index, job_id: job)
rescue ObjectStorage::MigrateUploadsWorker::SanityCheckError => e
# continue for the next batch
@@ -66,10 +46,12 @@ module Gitlab
def uploads(store_type = [nil, ObjectStorage::Store::LOCAL])
Upload.class_eval { include EachBatch } unless Upload < EachBatch
- Upload
- .where(store: store_type,
- uploader: @uploader_class.to_s,
- model_type: @model_class.base_class.sti_name)
+ uploads = Upload.where(store: store_type)
+ uploads = uploads.where(uploader: @uploader_class) if @uploader_class.present?
+ uploads = uploads.where(model_type: @model_class.base_class.sti_name) if @model_class.present?
+ uploads = uploads.where(mount_point: @mounted_as) if @mounted_as.present?
+
+ uploads
end
# rubocop:enable CodeReuse/ActiveRecord
end
diff --git a/lib/tasks/gitlab/uploads/migrate.rake b/lib/tasks/gitlab/uploads/migrate.rake
index 80290f95e8e..2a91fd1646c 100644
--- a/lib/tasks/gitlab/uploads/migrate.rake
+++ b/lib/tasks/gitlab/uploads/migrate.rake
@@ -2,15 +2,8 @@
namespace :gitlab do
namespace :uploads do
- namespace :migrate do
- desc "GitLab | Uploads | Migrate all uploaded files to object storage"
- task all: :environment do
- Gitlab::Uploads::MigrationHelper.categories.each do |args|
- Rake::Task["gitlab:uploads:migrate"].invoke(*args)
- Rake::Task["gitlab:uploads:migrate"].reenable
- end
- end
- end
+ desc "GitLab | Uploads | Migrate all uploaded files to object storage"
+ task 'migrate:all' => :migrate
# The following is the actual rake task that migrates uploads of specified
# category to object storage
@@ -19,15 +12,8 @@ namespace :gitlab do
Gitlab::Uploads::MigrationHelper.new(args, Logger.new($stdout)).migrate_to_remote_storage
end
- namespace :migrate_to_local do
- desc "GitLab | Uploads | Migrate all uploaded files to local storage"
- task all: :environment do
- Gitlab::Uploads::MigrationHelper.categories.each do |args|
- Rake::Task["gitlab:uploads:migrate_to_local"].invoke(*args)
- Rake::Task["gitlab:uploads:migrate_to_local"].reenable
- end
- end
- end
+ desc "GitLab | Uploads | Migrate all uploaded files to local storage"
+ task 'migrate_to_local:all' => :migrate_to_local
desc 'GitLab | Uploads | Migrate the uploaded files of specified type to local storage'
task :migrate_to_local, [:uploader_class, :model_class, :mounted_as] => :environment do |_t, args|
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 553f2f4cb5f..7dfa44ec153 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -22549,9 +22549,6 @@ msgstr ""
msgid "Job has wrong arguments format."
msgstr ""
-msgid "Job is missing the `model_type` argument."
-msgstr ""
-
msgid "Job is stuck. Check runners."
msgstr ""
@@ -25663,9 +25660,6 @@ msgstr ""
msgid "Most stars"
msgstr ""
-msgid "Mount point %{mounted_as} not found in %{model_class}."
-msgstr ""
-
msgid "Move"
msgstr ""
@@ -25756,12 +25750,6 @@ msgstr ""
msgid "Multiple Prometheus integrations are not supported"
msgstr ""
-msgid "Multiple model types found: %{model_types}"
-msgstr ""
-
-msgid "Multiple uploaders found: %{uploader_types}"
-msgstr ""
-
msgid "Multiplier to apply to polling intervals. Decimal values are supported. Defaults to 1."
msgstr ""
diff --git a/spec/finders/groups/accepting_group_transfers_finder_spec.rb b/spec/finders/groups/accepting_group_transfers_finder_spec.rb
new file mode 100644
index 00000000000..6c1ceb4f7ad
--- /dev/null
+++ b/spec/finders/groups/accepting_group_transfers_finder_spec.rb
@@ -0,0 +1,93 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Groups::AcceptingGroupTransfersFinder do
+ let_it_be(:current_user) { create(:user) }
+
+ let_it_be(:great_grandparent_group) do
+ create(:group, name: 'great grandparent group', path: 'great-grandparent-group')
+ end
+
+ let_it_be(:grandparent_group) { create(:group, parent: great_grandparent_group) }
+ let_it_be(:parent_group) { create(:group, parent: grandparent_group) }
+ let_it_be(:child_group) { create(:group, parent: parent_group) }
+ let_it_be(:grandchild_group) { create(:group, parent: child_group) }
+ let_it_be(:group_where_user_has_owner_access) do
+ create(:group, name: 'owner access group', path: 'owner-access-group').tap do |group|
+ group.add_owner(current_user)
+ end
+ end
+
+ let_it_be(:subgroup_of_group_where_user_has_owner_access) do
+ create(:group, parent: group_where_user_has_owner_access)
+ end
+
+ let_it_be(:group_where_user_has_developer_access) do
+ create(:group).tap do |group|
+ group.add_developer(current_user)
+ end
+ end
+
+ let(:params) { {} }
+
+ describe '#execute' do
+ let(:group_to_be_transferred) { parent_group }
+
+ subject(:result) do
+ described_class.new(current_user, group_to_be_transferred, params).execute
+ end
+
+ context 'when the user does not have the rights to transfer the group' do
+ before do
+ group_to_be_transferred.root_ancestor.add_developer(current_user)
+ end
+
+ it 'returns empty result' do
+ expect(result).to be_empty
+ end
+ end
+
+ context 'when the user has the rights to transfer the group' do
+ before do
+ group_to_be_transferred.root_ancestor.add_owner(current_user)
+ end
+
+ it 'does not return empty result' do
+ expect(result).not_to be_empty
+ end
+
+ it 'excludes the descendants of the group to be transferred' do
+ expect(result).not_to include(child_group, grandchild_group)
+ end
+
+ it 'excludes the immediate parent of the group to be transferred' do
+ expect(result).not_to include(grandparent_group)
+ end
+
+ it 'excludes the groups where the user does not have OWNER access' do
+ expect(result).not_to include(group_where_user_has_developer_access)
+ end
+
+ it 'includes ancestors, except immediate parent of the group to be transferred' do
+ expect(result).to include(great_grandparent_group)
+ end
+
+ it 'includes the other groups where the user has OWNER access' do
+ expect(result).to include(group_where_user_has_owner_access)
+ end
+
+ it 'includes the other groups where the user has OWNER access through inherited membership' do
+ expect(result).to include(subgroup_of_group_where_user_has_owner_access)
+ end
+
+ context 'on searching with a specific term' do
+ let(:params) { { search: 'great grandparent group' } }
+
+ it 'includes only the groups where the term matches the group name or path' do
+ expect(result).to contain_exactly(great_grandparent_group)
+ end
+ end
+ end
+ end
+end
diff --git a/spec/graphql/resolvers/issues_resolver_spec.rb b/spec/graphql/resolvers/issues_resolver_spec.rb
index 89e45810033..a74b2a3f18c 100644
--- a/spec/graphql/resolvers/issues_resolver_spec.rb
+++ b/spec/graphql/resolvers/issues_resolver_spec.rb
@@ -311,49 +311,15 @@ RSpec.describe Resolvers::IssuesResolver do
end
context 'when searching issues' do
- it 'returns correct issues' do
- expect(resolve_issues(search: 'foo')).to contain_exactly(issue2)
- end
-
- it 'uses project search optimization' do
- expected_arguments = a_hash_including(
- search: 'foo',
- attempt_project_search_optimizations: true
- )
- expect(IssuesFinder).to receive(:new).with(anything, expected_arguments).and_call_original
-
- resolve_issues(search: 'foo')
- end
-
- context 'with anonymous user' do
- let_it_be(:public_project) { create(:project, :public) }
- let_it_be(:public_issue) { create(:issue, project: public_project, title: 'Test issue') }
-
- context 'with disable_anonymous_search enabled' do
- before do
- stub_feature_flags(disable_anonymous_search: true)
- end
-
- it 'generates an error' do
- error_message = "User must be authenticated to include the `search` argument."
-
- expect_graphql_error_to_be_created(Gitlab::Graphql::Errors::ArgumentError, error_message) do
- resolve(described_class, obj: public_project, args: { search: 'test' }, ctx: { current_user: nil })
- end
- end
- end
-
- context 'with disable_anonymous_search disabled' do
- before do
- stub_feature_flags(disable_anonymous_search: false)
- end
-
- it 'returns correct issues' do
- expect(
- resolve(described_class, obj: public_project, args: { search: 'test' }, ctx: { current_user: nil })
- ).to contain_exactly(public_issue)
- end
- end
+ it_behaves_like 'graphql query for searching issuables' do
+ let_it_be(:parent) { project }
+ let_it_be(:issuable1) { create(:issue, project: project, title: 'first created') }
+ let_it_be(:issuable2) { create(:issue, project: project, title: 'second created', description: 'text 1') }
+ let_it_be(:issuable3) { create(:issue, project: project, title: 'third', description: 'text 2') }
+ let_it_be(:issuable4) { create(:issue, project: project) }
+
+ let_it_be(:finder_class) { IssuesFinder }
+ let_it_be(:optimization_param) { :attempt_project_search_optimizations }
end
end
diff --git a/spec/graphql/resolvers/work_items_resolver_spec.rb b/spec/graphql/resolvers/work_items_resolver_spec.rb
index 29eac0ab46e..ef7cc0daa0c 100644
--- a/spec/graphql/resolvers/work_items_resolver_spec.rb
+++ b/spec/graphql/resolvers/work_items_resolver_spec.rb
@@ -52,49 +52,15 @@ RSpec.describe Resolvers::WorkItemsResolver do
end
context 'when searching items' do
- it 'returns correct items' do
- expect(resolve_items(search: 'foo')).to contain_exactly(item2)
- end
-
- it 'uses project search optimization' do
- expected_arguments = a_hash_including(
- search: 'foo',
- attempt_project_search_optimizations: true
- )
- expect(::WorkItems::WorkItemsFinder).to receive(:new).with(anything, expected_arguments).and_call_original
-
- resolve_items(search: 'foo')
- end
-
- context 'with anonymous user' do
- let_it_be(:public_project) { create(:project, :public) }
- let_it_be(:public_item) { create(:work_item, project: public_project, title: 'Test item') }
-
- context 'with disable_anonymous_search enabled' do
- before do
- stub_feature_flags(disable_anonymous_search: true)
- end
-
- it 'generates an error' do
- error_message = "User must be authenticated to include the `search` argument."
-
- expect_graphql_error_to_be_created(Gitlab::Graphql::Errors::ArgumentError, error_message) do
- resolve(described_class, obj: public_project, args: { search: 'test' }, ctx: { current_user: nil })
- end
- end
- end
-
- context 'with disable_anonymous_search disabled' do
- before do
- stub_feature_flags(disable_anonymous_search: false)
- end
-
- it 'returns correct items' do
- expect(
- resolve(described_class, obj: public_project, args: { search: 'test' }, ctx: { current_user: nil })
- ).to contain_exactly(public_item)
- end
- end
+ it_behaves_like 'graphql query for searching issuables' do
+ let_it_be(:parent) { project }
+ let_it_be(:issuable1) { create(:work_item, project: project, title: 'first created') }
+ let_it_be(:issuable2) { create(:work_item, project: project, title: 'second created', description: 'text 1') }
+ let_it_be(:issuable3) { create(:work_item, project: project, title: 'third', description: 'text 2') }
+ let_it_be(:issuable4) { create(:work_item, project: project) }
+
+ let_it_be(:finder_class) { ::WorkItems::WorkItemsFinder }
+ let_it_be(:optimization_param) { :attempt_project_search_optimizations }
end
end
diff --git a/spec/helpers/commits_helper_spec.rb b/spec/helpers/commits_helper_spec.rb
index b27954de0d4..d2d2e226f2a 100644
--- a/spec/helpers/commits_helper_spec.rb
+++ b/spec/helpers/commits_helper_spec.rb
@@ -329,7 +329,7 @@ RSpec.describe CommitsHelper do
it { is_expected.to include(commit.author) }
it { is_expected.to include(ref) }
- it do
+ specify do
is_expected.to include(
{
merge_request: merge_request.cache_key,
diff --git a/spec/helpers/users/callouts_helper_spec.rb b/spec/helpers/users/callouts_helper_spec.rb
index 2c148aabead..170ae098a2f 100644
--- a/spec/helpers/users/callouts_helper_spec.rb
+++ b/spec/helpers/users/callouts_helper_spec.rb
@@ -241,10 +241,10 @@ RSpec.describe Users::CalloutsHelper do
context 'the web-hook failure callout has been dismissed', :freeze_time do
before do
- create(:namespace_callout,
+ create(:project_callout,
feature_name: described_class::WEB_HOOK_DISABLED,
user: user,
- namespace: project.namespace,
+ project: project,
dismissed_at: 1.week.ago)
end
diff --git a/spec/requests/api/graphql/project/issues_spec.rb b/spec/requests/api/graphql/project/issues_spec.rb
index 596e023a027..d0ed4212349 100644
--- a/spec/requests/api/graphql/project/issues_spec.rb
+++ b/spec/requests/api/graphql/project/issues_spec.rb
@@ -27,14 +27,6 @@ RSpec.describe 'getting an issue list for a project' do
QUERY
end
- let(:query) do
- graphql_query_for(
- 'project',
- { 'fullPath' => project.full_path },
- query_graphql_field('issues', issue_filter_params, fields)
- )
- end
-
it_behaves_like 'a working graphql query' do
before do
post_graphql(query, current_user: current_user)
@@ -89,6 +81,14 @@ RSpec.describe 'getting an issue list for a project' do
end
end
+ context 'when filtering by search' do
+ it_behaves_like 'query with a search term' do
+ let(:issuable_data) { issues_data }
+ let(:user) { current_user }
+ let_it_be(:issuable) { create(:issue, project: project, description: 'bar') }
+ end
+ end
+
context 'when limiting the number of results' do
let(:query) do
<<~GQL
@@ -679,4 +679,12 @@ RSpec.describe 'getting an issue list for a project' do
def issues_ids
graphql_dig_at(issues_data, :node, :id)
end
+
+ def query(params = issue_filter_params)
+ graphql_query_for(
+ 'project',
+ { 'fullPath' => project.full_path },
+ query_graphql_field('issues', params, fields)
+ )
+ end
end
diff --git a/spec/requests/api/graphql/project/work_items_spec.rb b/spec/requests/api/graphql/project/work_items_spec.rb
index 6ef28392b8b..1dd0046f782 100644
--- a/spec/requests/api/graphql/project/work_items_spec.rb
+++ b/spec/requests/api/graphql/project/work_items_spec.rb
@@ -27,14 +27,6 @@ RSpec.describe 'getting an work item list for a project' do
QUERY
end
- let(:query) do
- graphql_query_for(
- 'project',
- { 'fullPath' => project.full_path },
- query_graphql_field('workItems', item_filter_params, fields)
- )
- end
-
it_behaves_like 'a working graphql query' do
before do
post_graphql(query, current_user: current_user)
@@ -83,6 +75,14 @@ RSpec.describe 'getting an work item list for a project' do
end
end
+ context 'when filtering by search' do
+ it_behaves_like 'query with a search term' do
+ let(:issuable_data) { items_data }
+ let(:user) { current_user }
+ let_it_be(:issuable) { create(:work_item, project: project, description: 'bar') }
+ end
+ end
+
describe 'sorting and pagination' do
let(:data_path) { [:project, :work_items] }
@@ -118,4 +118,12 @@ RSpec.describe 'getting an work item list for a project' do
def item_ids
graphql_dig_at(items_data, :node, :id)
end
+
+ def query(params = item_filter_params)
+ graphql_query_for(
+ 'project',
+ { 'fullPath' => project.full_path },
+ query_graphql_field('workItems', params, fields)
+ )
+ end
end
diff --git a/spec/requests/api/groups_spec.rb b/spec/requests/api/groups_spec.rb
index 0be3341dd13..74be97fca2a 100644
--- a/spec/requests/api/groups_spec.rb
+++ b/spec/requests/api/groups_spec.rb
@@ -2029,6 +2029,81 @@ RSpec.describe API::Groups do
end
end
+ describe 'GET /groups/:id/transfer_locations' do
+ let_it_be(:user) { create(:user) }
+ let_it_be(:source_group) { create(:group, :private) }
+
+ let(:params) { {} }
+
+ subject(:request) do
+ get api("/groups/#{source_group.id}/transfer_locations", user), params: params
+ end
+
+ context 'when the user has rights to transfer the group' do
+ let_it_be(:guest_group) { create(:group) }
+ let_it_be(:maintainer_group) { create(:group, name: 'maintainer group', path: 'maintainer-group') }
+ let_it_be(:owner_group_1) { create(:group, name: 'owner group', path: 'owner-group') }
+ let_it_be(:owner_group_2) { create(:group, name: 'gitlab group', path: 'gitlab-group') }
+
+ before do
+ source_group.add_owner(user)
+ guest_group.add_guest(user)
+ maintainer_group.add_maintainer(user)
+ owner_group_1.add_owner(user)
+ owner_group_2.add_owner(user)
+ end
+
+ it 'returns 200' do
+ request
+
+ expect(response).to have_gitlab_http_status(:ok)
+ expect(response).to include_pagination_headers
+ end
+
+ it 'only includes groups where the user has permissions to transfer a group to' do
+ request
+
+ expect(group_ids_from_response).to contain_exactly(owner_group_1.id, owner_group_2.id)
+ end
+
+ context 'with search' do
+ let(:params) { { search: 'gitlab' } }
+
+ it 'includes groups where the user has permissions to transfer a group to, matching the search term' do
+ request
+
+ expect(group_ids_from_response).to contain_exactly(owner_group_2.id)
+ end
+ end
+
+ def group_ids_from_response
+ json_response.map { |group| group['id'] }
+ end
+ end
+
+ context 'when the user does not have permissions to transfer the group' do
+ before do
+ source_group.add_developer(user)
+ end
+
+ it 'returns 403' do
+ request
+
+ expect(response).to have_gitlab_http_status(:forbidden)
+ end
+ end
+
+ context 'for an anonymous user' do
+ let_it_be(:user) { nil }
+
+ it 'returns 404' do
+ request
+
+ expect(response).to have_gitlab_http_status(:not_found)
+ end
+ end
+ end
+
describe 'POST /groups/:id/transfer' do
let_it_be(:user) { create(:user) }
let_it_be_with_reload(:new_parent_group) { create(:group, :private) }
diff --git a/spec/support/shared_examples/graphql/resolvers/issuable_resolvers_shared_examples.rb b/spec/support/shared_examples/graphql/resolvers/issuable_resolvers_shared_examples.rb
new file mode 100644
index 00000000000..feaa8070090
--- /dev/null
+++ b/spec/support/shared_examples/graphql/resolvers/issuable_resolvers_shared_examples.rb
@@ -0,0 +1,95 @@
+# frozen_string_literal: true
+
+# Requires `parent`, issuable1`, `issuable2`, `issuable3`, `issuable4`,
+# `finder_class` and `optimization_param` bindings.
+RSpec.shared_examples 'graphql query for searching issuables' do
+ it 'uses search optimization' do
+ expected_arguments = a_hash_including(
+ search: 'text',
+ optimization_param => true
+ )
+ expect(finder_class).to receive(:new).with(anything, expected_arguments).and_call_original
+
+ resolve_issuables(search: 'text')
+ end
+
+ it 'filters issuables by title' do
+ issuables = resolve_issuables(search: 'created')
+
+ expect(issuables).to contain_exactly(issuable1, issuable2)
+ end
+
+ it 'filters issuables by description' do
+ issuables = resolve_issuables(search: 'text')
+
+ expect(issuables).to contain_exactly(issuable2, issuable3)
+ end
+
+ context 'with in param' do
+ it 'generates an error if param search is missing' do
+ error_message = "`search` should be present when including the `in` argument"
+
+ expect_graphql_error_to_be_created(Gitlab::Graphql::Errors::ArgumentError, error_message) do
+ resolve_issuables(in: ['title'])
+ end
+ end
+
+ it 'filters issuables by title and description' do
+ issuable4.update!(title: 'fourth text')
+ issuables = resolve_issuables(search: 'text', in: %w[title description])
+
+ expect(issuables).to contain_exactly(issuable2, issuable3, issuable4)
+ end
+
+ it 'filters issuables by description only' do
+ with_text = resolve_issuables(search: 'text', in: ['description'])
+ with_created = resolve_issuables(search: 'created', in: ['description'])
+
+ expect(with_created).to be_empty
+ expect(with_text).to contain_exactly(issuable2, issuable3)
+ end
+
+ it 'filters issuables by title only' do
+ with_text = resolve_issuables(search: 'text', in: ['title'])
+ with_created = resolve_issuables(search: 'created', in: ['title'])
+
+ expect(with_created).to contain_exactly(issuable1, issuable2)
+ expect(with_text).to be_empty
+ end
+ end
+
+ context 'with anonymous user' do
+ let_it_be(:current_user) { nil }
+
+ context 'with disable_anonymous_search as `true`' do
+ before do
+ stub_feature_flags(disable_anonymous_search: true)
+ end
+
+ it 'returns an error' do
+ error_message = "User must be authenticated to include the `search` argument."
+
+ expect_graphql_error_to_be_created(Gitlab::Graphql::Errors::ArgumentError, error_message) do
+ resolve_issuables(search: 'created')
+ end
+ end
+ end
+
+ context 'with disable_anonymous_search as `false`' do
+ before do
+ stub_feature_flags(disable_anonymous_search: false)
+ parent.update!(visibility_level: Gitlab::VisibilityLevel::PUBLIC)
+ end
+
+ it 'filters issuables by search term' do
+ issuables = resolve_issuables(search: 'created')
+
+ expect(issuables).to contain_exactly(issuable1, issuable2)
+ end
+ end
+ end
+
+ def resolve_issuables(args = {}, obj = parent, context = { current_user: current_user })
+ resolve(described_class, obj: obj, args: args, ctx: context, arg_style: :internal)
+ end
+end
diff --git a/spec/support/shared_examples/requests/api/graphql/issuable_search_shared_examples.rb b/spec/support/shared_examples/requests/api/graphql/issuable_search_shared_examples.rb
new file mode 100644
index 00000000000..22805cf7aed
--- /dev/null
+++ b/spec/support/shared_examples/requests/api/graphql/issuable_search_shared_examples.rb
@@ -0,0 +1,14 @@
+# frozen_string_literal: true
+
+# Requires `query(params)` , `user`, `issuable_data` and `issuable` bindings
+RSpec.shared_examples 'query with a search term' do
+ it 'returns only matching issuables' do
+ filter_params = { search: 'bar', in: [:DESCRIPTION] }
+ graphql_query = query(filter_params)
+
+ post_graphql(graphql_query, current_user: user)
+ ids = graphql_dig_at(issuable_data, :node, :id)
+
+ expect(ids).to contain_exactly(issuable.to_global_id.to_s)
+ end
+end
diff --git a/spec/support/shared_examples/tasks/gitlab/uploads/migration_shared_examples.rb b/spec/support/shared_examples/tasks/gitlab/uploads/migration_shared_examples.rb
deleted file mode 100644
index b37a8059574..00000000000
--- a/spec/support/shared_examples/tasks/gitlab/uploads/migration_shared_examples.rb
+++ /dev/null
@@ -1,31 +0,0 @@
-# frozen_string_literal: true
-
-# Expects the calling spec to define:
-# - uploader_class
-# - model_class
-# - mounted_as
-RSpec.shared_examples 'enqueue upload migration jobs in batch' do |batch:|
- def run(task)
- args = [uploader_class.to_s, model_class.to_s, mounted_as].compact
- run_rake_task(task, *args)
- end
-
- it 'migrates local storage to remote object storage' do
- expect(ObjectStorage::MigrateUploadsWorker)
- .to receive(:perform_async).exactly(batch).times
- .and_return("A fake job.")
-
- run('gitlab:uploads:migrate')
- end
-
- it 'migrates remote object storage to local storage' do
- expect(Upload).to receive(:where).exactly(batch + 1).times { Upload.all }
- expect(ObjectStorage::MigrateUploadsWorker)
- .to receive(:perform_async)
- .with(anything, model_class.name, mounted_as, ObjectStorage::Store::LOCAL)
- .exactly(batch).times
- .and_return("A fake job.")
-
- run('gitlab:uploads:migrate_to_local')
- end
-end
diff --git a/spec/tasks/gitlab/uploads/migrate_rake_spec.rb b/spec/tasks/gitlab/uploads/migrate_rake_spec.rb
index e293271ca67..3a368a5011b 100644
--- a/spec/tasks/gitlab/uploads/migrate_rake_spec.rb
+++ b/spec/tasks/gitlab/uploads/migrate_rake_spec.rb
@@ -2,133 +2,93 @@
require 'rake_helper'
-RSpec.describe 'gitlab:uploads:migrate and migrate_to_local rake tasks', :silence_stdout do
- let(:model_class) { nil }
- let(:uploader_class) { nil }
- let(:mounted_as) { nil }
- let(:batch_size) { 3 }
-
+RSpec.describe 'gitlab:uploads:migrate and migrate_to_local rake tasks', :sidekiq_inline, :silence_stdout do
before do
- stub_env('MIGRATION_BATCH_SIZE', batch_size.to_s)
- stub_uploads_object_storage(uploader_class)
+ stub_env('MIGRATION_BATCH_SIZE', 3.to_s)
+ stub_uploads_object_storage(AvatarUploader)
+ stub_uploads_object_storage(FileUploader)
Rake.application.rake_require 'tasks/gitlab/uploads/migrate'
- allow(ObjectStorage::MigrateUploadsWorker).to receive(:perform_async)
+ create_list(:project, 2, :with_avatar)
+ create_list(:group, 2, :with_avatar)
+ create_list(:project, 2) do |model|
+ FileUploader.new(model).store!(fixture_file_upload('spec/fixtures/doc_sample.txt'))
+ end
end
- context "for AvatarUploader" do
- let(:uploader_class) { AvatarUploader }
- let(:mounted_as) { :avatar }
+ let(:total_uploads_count) { 6 }
- context "for Project" do
- let(:model_class) { Project }
- let!(:projects) { create_list(:project, 10, :with_avatar) }
+ it 'migrates all uploads to object storage in batches' do
+ expect(ObjectStorage::MigrateUploadsWorker)
+ .to receive(:perform_async).twice.and_call_original
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 4
- end
+ run_rake_task('gitlab:uploads:migrate:all')
- context "for Group" do
- let(:model_class) { Group }
+ expect(Upload.with_files_stored_locally.count).to eq(0)
+ expect(Upload.with_files_stored_remotely.count).to eq(total_uploads_count)
+ end
- before do
- create_list(:group, 10, :with_avatar)
- end
+ it 'migrates all uploads to local storage in batches' do
+ run_rake_task('gitlab:uploads:migrate')
+ expect(Upload.with_files_stored_remotely.count).to eq(total_uploads_count)
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 4
- end
+ expect(ObjectStorage::MigrateUploadsWorker)
+ .to receive(:perform_async).twice.and_call_original
- context "for User" do
- let(:model_class) { User }
+ run_rake_task('gitlab:uploads:migrate_to_local:all')
- before do
- create_list(:user, 10, :with_avatar)
- end
-
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 4
- end
+ expect(Upload.with_files_stored_remotely.count).to eq(0)
+ expect(Upload.with_files_stored_locally.count).to eq(total_uploads_count)
end
- context "for AttachmentUploader" do
- let(:uploader_class) { AttachmentUploader }
+ shared_examples 'migrate task with filters' do
+ it 'migrates matching uploads to object storage' do
+ run_rake_task('gitlab:uploads:migrate', task_arguments)
- context "for Note" do
- let(:model_class) { Note }
- let(:mounted_as) { :attachment }
+ migrated_count = matching_uploads.with_files_stored_remotely.count
- before do
- create_list(:note, 10, :with_attachment)
- end
-
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 4
+ expect(migrated_count).to eq(matching_uploads.count)
+ expect(Upload.with_files_stored_locally.count).to eq(total_uploads_count - migrated_count)
end
- context "for Appearance" do
- let(:model_class) { Appearance }
- let(:mounted_as) { :logo }
+ it 'migrates matching uploads to local storage' do
+ run_rake_task('gitlab:uploads:migrate')
+ expect(Upload.with_files_stored_remotely.count).to eq(total_uploads_count)
+
+ run_rake_task('gitlab:uploads:migrate_to_local', task_arguments)
- before do
- create(:appearance, :with_logos)
- end
+ migrated_count = matching_uploads.with_files_stored_locally.count
- %i(logo header_logo).each do |mount|
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 1 do
- let(:mounted_as) { mount }
- end
- end
+ expect(migrated_count).to eq(matching_uploads.count)
+ expect(Upload.with_files_stored_remotely.count).to eq(total_uploads_count - migrated_count)
end
end
- context "for FileUploader" do
- let(:uploader_class) { FileUploader }
- let(:model_class) { Project }
+ context 'when uploader_class is given' do
+ let(:task_arguments) { ['FileUploader'] }
+ let(:matching_uploads) { Upload.where(uploader: 'FileUploader') }
- before do
- create_list(:project, 10) do |model|
- uploader_class.new(model)
- .store!(fixture_file_upload('spec/fixtures/doc_sample.txt'))
- end
- end
-
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 4
+ it_behaves_like 'migrate task with filters'
end
- context "for PersonalFileUploader" do
- let(:uploader_class) { PersonalFileUploader }
- let(:model_class) { PersonalSnippet }
-
- before do
- create_list(:personal_snippet, 10) do |model|
- uploader_class.new(model)
- .store!(fixture_file_upload('spec/fixtures/doc_sample.txt'))
- end
- end
+ context 'when model_class is given' do
+ let(:task_arguments) { [nil, 'Project'] }
+ let(:matching_uploads) { Upload.where(model_type: 'Project') }
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 4
+ it_behaves_like 'migrate task with filters'
end
- context "for NamespaceFileUploader" do
- let(:uploader_class) { NamespaceFileUploader }
- let(:model_class) { Snippet }
+ context 'when mounted_as is given' do
+ let(:task_arguments) { [nil, nil, :avatar] }
+ let(:matching_uploads) { Upload.where(mount_point: :avatar) }
- before do
- create_list(:snippet, 10) do |model|
- uploader_class.new(model)
- .store!(fixture_file_upload('spec/fixtures/doc_sample.txt'))
- end
- end
-
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 4
+ it_behaves_like 'migrate task with filters'
end
- context 'for DesignManagement::DesignV432x230Uploader' do
- let(:uploader_class) { DesignManagement::DesignV432x230Uploader }
- let(:model_class) { DesignManagement::Action }
- let(:mounted_as) { :image_v432x230 }
-
- before do
- create_list(:design_action, 10, :with_image_v432x230)
- end
+ context 'when multiple filters are given' do
+ let(:task_arguments) { %w[AvatarUploader Project] }
+ let(:matching_uploads) { Upload.where(uploader: 'AvatarUploader', model_type: 'Project') }
- it_behaves_like 'enqueue upload migration jobs in batch', batch: 4
+ it_behaves_like 'migrate task with filters'
end
end
diff --git a/spec/uploaders/workers/object_storage/migrate_uploads_worker_spec.rb b/spec/uploaders/workers/object_storage/migrate_uploads_worker_spec.rb
index fd01a18e810..1746f480c9b 100644
--- a/spec/uploaders/workers/object_storage/migrate_uploads_worker_spec.rb
+++ b/spec/uploaders/workers/object_storage/migrate_uploads_worker_spec.rb
@@ -3,120 +3,62 @@
require 'spec_helper'
RSpec.describe ObjectStorage::MigrateUploadsWorker do
- let(:model_class) { Project }
+ let(:project) { create(:project, :with_avatar) }
let(:uploads) { Upload.all }
- let(:to_store) { ObjectStorage::Store::REMOTE }
- def perform(uploads, store = nil)
- described_class.new.perform(uploads.ids, model_class.to_s, mounted_as, store || to_store)
+ def perform(uploads, store = ObjectStorage::Store::REMOTE)
+ described_class.new.perform(uploads.ids, store)
rescue ObjectStorage::MigrateUploadsWorker::Report::MigrationFailures
# swallow
end
- # Expects the calling spec to define:
- # - model_class
- # - mounted_as
- # - to_store
- RSpec.shared_examples 'uploads migration worker' do
- describe '.enqueue!' do
- def enqueue!
- described_class.enqueue!(uploads, model_class, mounted_as, to_store)
- end
-
- it 'is guarded by .sanity_check!' do
- expect(described_class).to receive(:perform_async)
- expect(described_class).to receive(:sanity_check!)
+ before do
+ stub_uploads_object_storage(AvatarUploader)
+ stub_uploads_object_storage(FileUploader)
- enqueue!
- end
+ FileUploader.new(project).store!(fixture_file_upload('spec/fixtures/doc_sample.txt'))
+ end
- context 'sanity_check! fails' do
- before do
- expect(described_class).to receive(:sanity_check!).and_raise(described_class::SanityCheckError)
- end
+ describe '#perform' do
+ it 'migrates files to remote storage' do
+ expect(Gitlab::AppLogger).to receive(:info).with(%r{Migrated 2/2 files})
- it 'does not enqueue a job' do
- expect(described_class).not_to receive(:perform_async)
+ perform(uploads)
- expect { enqueue! }.to raise_error(described_class::SanityCheckError)
- end
- end
+ expect(Upload.where(store: ObjectStorage::Store::LOCAL).count).to eq(0)
+ expect(Upload.where(store: ObjectStorage::Store::REMOTE).count).to eq(2)
end
- describe '.sanity_check!' do
- shared_examples 'raises a SanityCheckError' do |expected_message|
- let(:mount_point) { nil }
-
- it do
- expect { described_class.sanity_check!(uploads, model_class, mount_point) }
- .to raise_error(described_class::SanityCheckError).with_message(expected_message)
- end
+ context 'reversed' do
+ before do
+ perform(uploads)
end
- context 'uploader types mismatch' do
- let!(:outlier) { create(:upload, uploader: 'GitlabUploader') }
+ it 'migrates files to local storage' do
+ expect(Upload.where(store: ObjectStorage::Store::REMOTE).count).to eq(2)
- include_examples 'raises a SanityCheckError', /Multiple uploaders found/
- end
+ perform(uploads, ObjectStorage::Store::LOCAL)
- context 'mount point not found' do
- include_examples 'raises a SanityCheckError', /Mount point [a-z:]+ not found in/ do
- let(:mount_point) { :potato }
- end
+ expect(Upload.where(store: ObjectStorage::Store::LOCAL).count).to eq(2)
+ expect(Upload.where(store: ObjectStorage::Store::REMOTE).count).to eq(0)
end
end
- describe '#perform' do
- it 'migrates files to remote storage' do
- expect(Gitlab::AppLogger).to receive(:info).with(%r{Migrated 1/1 files})
-
- perform(uploads)
-
- expect(Upload.where(store: ObjectStorage::Store::LOCAL).count).to eq(0)
- end
-
- context 'reversed' do
- let(:to_store) { ObjectStorage::Store::LOCAL }
-
- before do
- perform(uploads, ObjectStorage::Store::REMOTE)
- end
-
- it 'migrates files to local storage' do
- expect(Upload.where(store: ObjectStorage::Store::REMOTE).count).to eq(1)
-
- perform(uploads)
-
- expect(Upload.where(store: ObjectStorage::Store::LOCAL).count).to eq(1)
- end
+ context 'migration is unsuccessful' do
+ before do
+ allow_any_instance_of(ObjectStorage::Concern)
+ .to receive(:migrate!).and_raise(CarrierWave::UploadError, 'I am a teapot.')
end
- context 'migration is unsuccessful' do
- before do
- allow_any_instance_of(ObjectStorage::Concern)
- .to receive(:migrate!).and_raise(CarrierWave::UploadError, 'I am a teapot.')
- end
-
- it 'does not migrate files to remote storage' do
- expect(Gitlab::AppLogger).to receive(:warn).with(/Error .* I am a teapot/)
+ it 'does not migrate files to remote storage' do
+ expect(Gitlab::AppLogger).to receive(:warn).with(/Error .* I am a teapot/)
- perform(uploads)
+ perform(uploads)
- expect(Upload.where(store: ObjectStorage::Store::LOCAL).count).to eq(1)
- end
+ expect(Upload.where(store: ObjectStorage::Store::LOCAL).count).to eq(2)
+ expect(Upload.where(store: ObjectStorage::Store::REMOTE).count).to eq(0)
end
end
- end
-
- context "for AvatarUploader" do
- let!(:project_with_avatar) { create(:project, :with_avatar) }
- let(:mounted_as) { :avatar }
-
- before do
- stub_uploads_object_storage(AvatarUploader)
- end
-
- it_behaves_like "uploads migration worker"
describe "limits N+1 queries" do
it "to N*5" do
@@ -127,46 +69,18 @@ RSpec.describe ObjectStorage::MigrateUploadsWorker do
expect { perform(Upload.all) }.not_to exceed_query_limit(query_count).with_threshold(5)
end
end
- end
-
- context "for FileUploader" do
- let!(:project_with_file) { create(:project) }
- let(:secret) { SecureRandom.hex }
- let(:mounted_as) { nil }
-
- def upload_file(project)
- uploader = FileUploader.new(project)
- uploader.store!(fixture_file_upload('spec/fixtures/doc_sample.txt'))
- end
-
- before do
- stub_uploads_object_storage(FileUploader)
-
- upload_file(project_with_file)
- end
-
- it_behaves_like "uploads migration worker"
-
- describe "limits N+1 queries" do
- it "to N*5" do
- query_count = ActiveRecord::QueryRecorder.new { perform(uploads) }
- upload_file(create(:project))
+ it 'handles legacy argument format' do
+ described_class.new.perform(uploads.ids, 'Project', :avatar, ObjectStorage::Store::REMOTE)
- expect { perform(Upload.all) }.not_to exceed_query_limit(query_count).with_threshold(5)
- end
+ expect(Upload.where(store: ObjectStorage::Store::LOCAL).count).to eq(0)
+ expect(Upload.where(store: ObjectStorage::Store::REMOTE).count).to eq(2)
end
- end
- context 'for DesignManagement::DesignV432x230Uploader' do
- let(:model_class) { DesignManagement::Action }
- let!(:design_action) { create(:design_action, :with_image_v432x230) }
- let(:mounted_as) { :image_v432x230 }
+ it 'logs an error when number of arguments is incorrect' do
+ expect(Gitlab::AppLogger).to receive(:warn).with(/Job has wrong arguments format/)
- before do
- stub_uploads_object_storage(DesignManagement::DesignV432x230Uploader)
+ described_class.new.perform(uploads.ids, 'Project', ObjectStorage::Store::REMOTE)
end
-
- it_behaves_like 'uploads migration worker'
end
end