Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2023-08-03 18:09:37 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2023-08-03 18:09:37 +0300
commit388e0fbbd00e04a10e3ac1084945aa18a781c40c (patch)
treeff8dff4f52d2432f37726d92f2efb8957a8609b7
parentaeee636c18f82107ec7a489f33c944c65ad5f34e (diff)
Add latest changes from gitlab-org/gitlab@master
-rw-r--r--app/assets/javascripts/search/sidebar/components/archived_filter/index.vue10
-rw-r--r--app/assets/javascripts/sessions/new/components/email_verification.vue103
-rw-r--r--app/assets/javascripts/sessions/new/components/update_email.vue130
-rw-r--r--app/assets/javascripts/sessions/new/constants.js10
-rw-r--r--app/assets/javascripts/sessions/new/index.js11
-rw-r--r--app/controllers/concerns/verifies_with_email.rb24
-rw-r--r--app/helpers/sessions_helper.rb15
-rw-r--r--app/helpers/sidebars_helper.rb4
-rw-r--r--app/models/application_setting.rb4
-rw-r--r--app/models/ci/runner_manager.rb3
-rw-r--r--app/models/commit_collection.rb8
-rw-r--r--app/models/merge_request.rb4
-rw-r--r--app/models/user.rb1
-rw-r--r--app/services/users/email_verification/update_email_service.rb76
-rw-r--r--app/views/import/gitea/new.html.haml15
-rw-r--r--app/views/import/gitea/status.html.haml4
-rw-r--r--app/views/layouts/_page.html.haml2
-rw-r--r--app/views/shared/nav/_your_work_scope_header.html.haml2
-rw-r--r--config/feature_flags/development/keep_merge_commits_for_approvals.yml8
-rw-r--r--config/routes/user.rb1
-rw-r--r--db/migrate/20230710094027_add_protected_paths_for_get_request_to_application_settings.rb26
-rw-r--r--db/migrate/20230711151845_add_email_reset_offered_at_to_user_details.rb9
-rw-r--r--db/migrate/20230728193736_add_has_merge_request_to_vulnerability_reads.rb13
-rw-r--r--db/post_migrate/20230725035942_create_sync_index_for_ci_pipline_variables_pipeline_id.rb17
-rw-r--r--db/schema_migrations/202307100940271
-rw-r--r--db/schema_migrations/202307111518451
-rw-r--r--db/schema_migrations/202307250359421
-rw-r--r--db/schema_migrations/202307281937361
-rw-r--r--db/structure.sql6
-rw-r--r--doc/administration/reference_architectures/50k_users.md2
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-admin-area.md31
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-backups.md29
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-ci-runners.md161
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-container-registry.md72
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-contributions-forks.md127
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-data-migration.md100
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-database-sequences.md69
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-git-access.md38
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-global-search.md23
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-graphql.md28
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-organizations.md35
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-router-endpoints-classification.md21
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-schema-changes.md36
-rw-r--r--doc/architecture/blueprints/cells/cells-feature-secrets.md26
-rw-r--r--doc/architecture/blueprints/cells/index.md198
-rw-r--r--doc/development/secure_coding_guidelines.md2
-rw-r--r--doc/tutorials/create_register_first_runner/index.md167
-rw-r--r--doc/tutorials/website_project_with_analytics/index.md162
-rw-r--r--doc/user/project/import/gitea.md2
-rw-r--r--doc/user/project/repository/push_rules.md4
-rw-r--r--doc/user/usage_quotas.md2
-rw-r--r--locale/gitlab.pot39
-rw-r--r--spec/features/users/email_verification_on_login_spec.rb73
-rw-r--r--spec/frontend/search/sidebar/components/archived_filter_spec.js16
-rw-r--r--spec/frontend/sessions/new/components/email_verification_spec.js50
-rw-r--r--spec/frontend/sessions/new/components/update_email_spec.js184
-rw-r--r--spec/helpers/sessions_helper_spec.rb55
-rw-r--r--spec/helpers/sidebars_helper_spec.rb4
-rw-r--r--spec/models/application_setting_spec.rb4
-rw-r--r--spec/models/ci/runner_manager_spec.rb17
-rw-r--r--spec/models/commit_collection_spec.rb20
-rw-r--r--spec/models/merge_request_spec.rb23
-rw-r--r--spec/models/user_spec.rb3
-rw-r--r--spec/requests/verifies_with_email_spec.rb113
-rw-r--r--spec/services/users/email_verification/update_email_service_spec.rb119
65 files changed, 1851 insertions, 714 deletions
diff --git a/app/assets/javascripts/search/sidebar/components/archived_filter/index.vue b/app/assets/javascripts/search/sidebar/components/archived_filter/index.vue
index dc92d7bfd58..250b3541bee 100644
--- a/app/assets/javascripts/search/sidebar/components/archived_filter/index.vue
+++ b/app/assets/javascripts/search/sidebar/components/archived_filter/index.vue
@@ -2,6 +2,7 @@
import { GlFormCheckboxGroup, GlFormCheckbox } from '@gitlab/ui';
import { mapState, mapActions } from 'vuex';
import Tracking from '~/tracking';
+import { parseBoolean } from '~/lib/utils/common_utils';
import { archivedFilterData, TRACKING_NAMESPACE, TRACKING_LABEL_CHECKBOX } from './data';
@@ -15,11 +16,12 @@ export default {
...mapState(['urlQuery']),
selectedFilter: {
get() {
- return [Boolean(this.urlQuery?.include_archived)];
+ return [parseBoolean(this.urlQuery?.include_archived)];
},
- set([value = '']) {
- this.setQuery({ key: archivedFilterData.filterParam, value: `${value}` });
- this.trackSelectCheckbox(value);
+ set(value) {
+ const newValue = value?.pop() ?? false;
+ this.setQuery({ key: archivedFilterData.filterParam, value: newValue?.toString() });
+ this.trackSelectCheckbox(newValue);
},
},
},
diff --git a/app/assets/javascripts/sessions/new/components/email_verification.vue b/app/assets/javascripts/sessions/new/components/email_verification.vue
index 87385b91c42..6a67c25b58f 100644
--- a/app/assets/javascripts/sessions/new/components/email_verification.vue
+++ b/app/assets/javascripts/sessions/new/components/email_verification.vue
@@ -12,10 +12,12 @@ import {
I18N_RESEND_LINK,
I18N_EMAIL_RESEND_SUCCESS,
I18N_GENERIC_ERROR,
+ I18N_UPDATE_EMAIL,
VERIFICATION_CODE_REGEX,
SUCCESS_RESPONSE,
FAILURE_RESPONSE,
} from '../constants';
+import UpdateEmail from './update_email.vue';
export default {
name: 'EmailVerification',
@@ -25,6 +27,7 @@ export default {
GlFormGroup,
GlFormInput,
GlButton,
+ UpdateEmail,
},
props: {
obfuscatedEmail: {
@@ -39,12 +42,22 @@ export default {
type: String,
required: true,
},
+ isOfferEmailReset: {
+ type: Boolean,
+ required: true,
+ },
+ updateEmailPath: {
+ type: String,
+ required: true,
+ },
},
data() {
return {
+ email: this.obfuscatedEmail,
verificationCode: '',
submitted: false,
verifyError: '',
+ showUpdateEmail: false,
};
},
computed: {
@@ -126,49 +139,73 @@ export default {
this.submitted = false;
this.$refs.input.$el.focus();
},
+ updateEmail() {
+ this.showUpdateEmail = true;
+ },
+ verifyToken(email = '') {
+ this.showUpdateEmail = false;
+ if (email.length) this.email = email;
+ this.$nextTick(this.resetForm);
+ },
},
i18n: {
explanation: I18N_EXPLANATION,
inputLabel: I18N_INPUT_LABEL,
submitButton: I18N_SUBMIT_BUTTON,
resendLink: I18N_RESEND_LINK,
+ updateEmail: I18N_UPDATE_EMAIL,
},
};
</script>
<template>
- <gl-form @submit.prevent="verify">
- <section class="gl-mb-5">
- <gl-sprintf :message="$options.i18n.explanation">
- <template #email>
- <strong>{{ obfuscatedEmail }}</strong>
- </template>
- </gl-sprintf>
- </section>
- <gl-form-group
- :label="$options.i18n.inputLabel"
- label-for="verification-code"
- :state="inputValidation.state"
- :invalid-feedback="inputValidation.message"
- >
- <gl-form-input
- id="verification-code"
- ref="input"
- v-model="verificationCode"
- autofocus
- autocomplete="one-time-code"
- inputmode="numeric"
- maxlength="6"
+ <div>
+ <update-email
+ v-if="showUpdateEmail"
+ :update-email-path="updateEmailPath"
+ @verifyToken="verifyToken"
+ />
+ <gl-form v-else @submit.prevent="verify">
+ <section class="gl-mb-5">
+ <gl-sprintf :message="$options.i18n.explanation">
+ <template #email>
+ <strong>{{ email }}</strong>
+ </template>
+ </gl-sprintf>
+ </section>
+ <gl-form-group
+ :label="$options.i18n.inputLabel"
+ label-for="verification-code"
:state="inputValidation.state"
- />
- </gl-form-group>
- <section class="gl-mt-5">
- <gl-button block variant="confirm" type="submit" :disabled="!inputValidation.state">{{
- $options.i18n.submitButton
- }}</gl-button>
- <gl-button block variant="link" class="gl-mt-3 gl-h-7" @click="resend">{{
- $options.i18n.resendLink
- }}</gl-button>
- </section>
- </gl-form>
+ :invalid-feedback="inputValidation.message"
+ >
+ <gl-form-input
+ id="verification-code"
+ ref="input"
+ v-model="verificationCode"
+ autofocus
+ autocomplete="one-time-code"
+ inputmode="numeric"
+ maxlength="6"
+ :state="inputValidation.state"
+ />
+ </gl-form-group>
+ <section class="gl-mt-5">
+ <gl-button block variant="confirm" type="submit" :disabled="!inputValidation.state">{{
+ $options.i18n.submitButton
+ }}</gl-button>
+ <gl-button block variant="link" class="gl-mt-3 gl-h-7" @click="resend">{{
+ $options.i18n.resendLink
+ }}</gl-button>
+ <gl-button
+ v-if="isOfferEmailReset"
+ block
+ variant="link"
+ class="gl-mt-3 gl-h-7"
+ @click="updateEmail"
+ >{{ $options.i18n.updateEmail }}</gl-button
+ >
+ </section>
+ </gl-form>
+ </div>
</template>
diff --git a/app/assets/javascripts/sessions/new/components/update_email.vue b/app/assets/javascripts/sessions/new/components/update_email.vue
new file mode 100644
index 00000000000..f63176e5513
--- /dev/null
+++ b/app/assets/javascripts/sessions/new/components/update_email.vue
@@ -0,0 +1,130 @@
+<script>
+import { GlForm, GlFormGroup, GlFormInput, GlButton } from '@gitlab/ui';
+import { createAlert, VARIANT_SUCCESS } from '~/alert';
+import { isEmail } from '~/lib/utils/forms';
+import axios from '~/lib/utils/axios_utils';
+import {
+ I18N_EMAIL,
+ I18N_UPDATE_EMAIL,
+ I18N_CANCEL,
+ I18N_EMAIL_INVALID,
+ I18N_UPDATE_EMAIL_SUCCESS,
+ I18N_GENERIC_ERROR,
+ SUCCESS_RESPONSE,
+ FAILURE_RESPONSE,
+} from '../constants';
+
+export default {
+ name: 'UpdateEmail',
+ components: {
+ GlForm,
+ GlFormGroup,
+ GlFormInput,
+ GlButton,
+ },
+ props: {
+ updateEmailPath: {
+ type: String,
+ required: true,
+ },
+ },
+ data() {
+ return {
+ email: '',
+ submitted: false,
+ verifyError: '',
+ };
+ },
+ computed: {
+ inputValidation() {
+ return {
+ state: !(this.submitted && this.invalidFeedback),
+ message: this.invalidFeedback,
+ };
+ },
+ invalidFeedback() {
+ if (!this.submitted) {
+ return '';
+ }
+
+ if (!isEmail(this.email)) {
+ return I18N_EMAIL_INVALID;
+ }
+
+ return this.verifyError;
+ },
+ },
+ watch: {
+ email() {
+ this.verifyError = '';
+ },
+ },
+ methods: {
+ updateEmail() {
+ this.submitted = true;
+
+ if (!this.inputValidation.state) return;
+
+ axios
+ .patch(this.updateEmailPath, { user: { email: this.email } })
+ .then(this.handleResponse)
+ .catch(this.handleError);
+ },
+ handleResponse(response) {
+ if (response.data.status === undefined) {
+ this.handleError();
+ } else if (response.data.status === SUCCESS_RESPONSE) {
+ this.handleSuccess();
+ } else if (response.data.status === FAILURE_RESPONSE) {
+ this.verifyError = response.data.message;
+ }
+ },
+ handleSuccess() {
+ createAlert({
+ message: I18N_UPDATE_EMAIL_SUCCESS,
+ variant: VARIANT_SUCCESS,
+ });
+ this.$emit('verifyToken', this.email);
+ },
+ handleError(error) {
+ createAlert({
+ message: I18N_GENERIC_ERROR,
+ captureError: true,
+ error,
+ });
+ },
+ },
+ i18n: {
+ email: I18N_EMAIL,
+ updateEmail: I18N_UPDATE_EMAIL,
+ cancel: I18N_CANCEL,
+ },
+};
+</script>
+
+<template>
+ <gl-form novalidate @submit.prevent="updateEmail">
+ <gl-form-group
+ :label="$options.i18n.email"
+ label-for="update-email"
+ :state="inputValidation.state"
+ :invalid-feedback="inputValidation.message"
+ >
+ <gl-form-input
+ id="update-email"
+ v-model="email"
+ type="email"
+ autofocus
+ :state="inputValidation.state"
+ />
+ </gl-form-group>
+ <section class="gl-mt-5">
+ <gl-button block variant="confirm" type="submit" :disabled="!inputValidation.state">{{
+ $options.i18n.updateEmail
+ }}</gl-button>
+ <gl-button block variant="link" class="gl-mt-3 gl-h-7" @click="$emit('verifyToken')">{{
+ $options.i18n.cancel
+ }}</gl-button>
+ </section>
+ </gl-form>
+</template>
diff --git a/app/assets/javascripts/sessions/new/constants.js b/app/assets/javascripts/sessions/new/constants.js
index 203a8aee1c4..dec96f78232 100644
--- a/app/assets/javascripts/sessions/new/constants.js
+++ b/app/assets/javascripts/sessions/new/constants.js
@@ -1,4 +1,4 @@
-import { s__ } from '~/locale';
+import { s__, __ } from '~/locale';
export const I18N_EXPLANATION = s__(
"IdentityVerification|For added security, you'll need to verify your identity. We've sent a verification code to %{email}",
@@ -13,6 +13,14 @@ export const I18N_GENERIC_ERROR = s__(
'IdentityVerification|Something went wrong. Please try again.',
);
+export const I18N_EMAIL = __('Email');
+export const I18N_UPDATE_EMAIL = s__('IdentityVerification|Update email');
+export const I18N_CANCEL = __('Cancel');
+export const I18N_EMAIL_INVALID = s__('IdentityVerification|Please enter a valid email address.');
+export const I18N_UPDATE_EMAIL_SUCCESS = s__(
+ 'IdentityVerification|A new code has been sent to your updated email address.',
+);
+
export const VERIFICATION_CODE_REGEX = /^\d{6}$/;
export const SUCCESS_RESPONSE = 'success';
export const FAILURE_RESPONSE = 'failure';
diff --git a/app/assets/javascripts/sessions/new/index.js b/app/assets/javascripts/sessions/new/index.js
index 51022a281e3..bf126b0e202 100644
--- a/app/assets/javascripts/sessions/new/index.js
+++ b/app/assets/javascripts/sessions/new/index.js
@@ -1,4 +1,5 @@
import Vue from 'vue';
+import { parseBoolean } from '~/lib/utils/common_utils';
import EmailVerification from './components/email_verification.vue';
export default () => {
@@ -8,14 +9,20 @@ export default () => {
return null;
}
- const { obfuscatedEmail, verifyPath, resendPath } = el.dataset;
+ const { obfuscatedEmail, verifyPath, resendPath, offerEmailReset, updateEmailPath } = el.dataset;
return new Vue({
el,
name: 'EmailVerificationRoot',
render(createElement) {
return createElement(EmailVerification, {
- props: { obfuscatedEmail, verifyPath, resendPath },
+ props: {
+ obfuscatedEmail,
+ verifyPath,
+ resendPath,
+ isOfferEmailReset: parseBoolean(offerEmailReset),
+ updateEmailPath,
+ },
});
},
});
diff --git a/app/controllers/concerns/verifies_with_email.rb b/app/controllers/concerns/verifies_with_email.rb
index f52903fe7e9..6affd7bb4cc 100644
--- a/app/controllers/concerns/verifies_with_email.rb
+++ b/app/controllers/concerns/verifies_with_email.rb
@@ -58,6 +58,21 @@ module VerifiesWithEmail
end
end
+ def update_email
+ return unless user = find_verification_user
+
+ log_verification(user, :email_update_requested)
+ result = Users::EmailVerification::UpdateEmailService.new(user: user).execute(email: email_params[:email])
+
+ if result[:status] == :success
+ send_verification_instructions(user)
+ else
+ handle_verification_failure(user, result[:reason], result[:message])
+ end
+
+ render json: result
+ end
+
def successful_verification
session.delete(:verification_user_id)
@redirect_url = after_sign_in_path_for(current_user) # rubocop:disable Gitlab/ModuleWithInstanceVariables
@@ -88,7 +103,8 @@ module VerifiesWithEmail
def send_verification_instructions_email(user, token)
return unless user.can?(:receive_notifications)
- Notify.verification_instructions_email(user.email, token: token).deliver_later
+ email = verification_email(user)
+ Notify.verification_instructions_email(email, token: token).deliver_later
log_verification(user, :instructions_sent)
end
@@ -129,6 +145,8 @@ module VerifiesWithEmail
end
def handle_verification_success(user)
+ user.confirm if unconfirmed_verification_email?(user)
+ user.email_reset_offered_at = Time.current if user.email_reset_offered_at.nil?
user.unlock_access!
log_verification(user, :successful)
@@ -157,6 +175,10 @@ module VerifiesWithEmail
params.require(:user).permit(:verification_token)
end
+ def email_params
+ params.require(:user).permit(:email)
+ end
+
def log_verification(user, event, reason = nil)
Gitlab::AppLogger.info(
message: 'Email Verification',
diff --git a/app/helpers/sessions_helper.rb b/app/helpers/sessions_helper.rb
index d5b642994f1..cf5cc92587f 100644
--- a/app/helpers/sessions_helper.rb
+++ b/app/helpers/sessions_helper.rb
@@ -49,11 +49,22 @@ module SessionsHelper
Gitlab::CurrentSettings.remember_me_enabled?
end
+ def unconfirmed_verification_email?(user)
+ token_valid_from = ::Users::EmailVerification::ValidateTokenService::TOKEN_VALID_FOR_MINUTES.minutes.ago
+ user.email_reset_offered_at.nil? && user.pending_reconfirmation? && user.confirmation_sent_at >= token_valid_from
+ end
+
+ def verification_email(user)
+ unconfirmed_verification_email?(user) ? user.unconfirmed_email : user.email
+ end
+
def verification_data(user)
{
- obfuscated_email: obfuscated_email(user.email),
+ obfuscated_email: obfuscated_email(verification_email(user)),
verify_path: session_path(:user),
- resend_path: users_resend_verification_code_path
+ resend_path: users_resend_verification_code_path,
+ offer_email_reset: user.email_reset_offered_at.nil?.to_s,
+ update_email_path: users_update_email_path
}
end
end
diff --git a/app/helpers/sidebars_helper.rb b/app/helpers/sidebars_helper.rb
index 0329a3e136b..35363d07051 100644
--- a/app/helpers/sidebars_helper.rb
+++ b/app/helpers/sidebars_helper.rb
@@ -74,7 +74,7 @@ module SidebarsHelper
username: user.username,
avatar_url: user.avatar_url,
has_link_to_profile: current_user_menu?(:profile),
- link_to_profile: user_url(user),
+ link_to_profile: user_path(user),
logo_url: current_appearance&.header_logo_path,
status: user_status_menu_data(user),
settings: {
@@ -103,7 +103,7 @@ module SidebarsHelper
current_context: super_sidebar_current_context(project: project, group: group),
context_switcher_links: context_switcher_links,
pinned_items: user.pinned_nav_items[panel_type] || super_sidebar_default_pins(panel_type),
- update_pins_url: pins_url,
+ update_pins_url: pins_path,
is_impersonating: impersonating?,
stop_impersonation_path: admin_impersonation_path,
shortcut_links: shortcut_links(user, project: project)
diff --git a/app/models/application_setting.rb b/app/models/application_setting.rb
index 35574a23d41..44c6e4d949f 100644
--- a/app/models/application_setting.rb
+++ b/app/models/application_setting.rb
@@ -416,6 +416,10 @@ class ApplicationSetting < MainClusterwide::ApplicationRecord
length: { maximum: 100, message: N_('is too long (maximum is 100 entries)') },
allow_nil: false
+ validates :protected_paths_for_get_request,
+ length: { maximum: 100, message: N_('is too long (maximum is 100 entries)') },
+ allow_nil: false
+
validates :push_event_hooks_limit,
numericality: { greater_than_or_equal_to: 0 }
diff --git a/app/models/ci/runner_manager.rb b/app/models/ci/runner_manager.rb
index 3a3f95a8c69..1c06c786b9d 100644
--- a/app/models/ci/runner_manager.rb
+++ b/app/models/ci/runner_manager.rb
@@ -14,7 +14,8 @@ module Ci
belongs_to :runner
- has_many :runner_manager_builds, inverse_of: :runner_manager, class_name: 'Ci::RunnerManagerBuild'
+ has_many :runner_manager_builds, inverse_of: :runner_manager, foreign_key: :runner_machine_id,
+ class_name: 'Ci::RunnerManagerBuild'
has_many :builds, through: :runner_manager_builds, class_name: 'Ci::Build'
belongs_to :runner_version, inverse_of: :runner_managers, primary_key: :version, foreign_key: :version,
class_name: 'Ci::RunnerVersion'
diff --git a/app/models/commit_collection.rb b/app/models/commit_collection.rb
index edc60a757d2..993e1af20d5 100644
--- a/app/models/commit_collection.rb
+++ b/app/models/commit_collection.rb
@@ -24,8 +24,12 @@ class CommitCollection
commits.each(&block)
end
- def committers
- emails = without_merge_commits.filter_map(&:committer_email).uniq
+ def committers(with_merge_commits: false)
+ emails = if with_merge_commits
+ commits.filter_map(&:committer_email).uniq
+ else
+ without_merge_commits.filter_map(&:committer_email).uniq
+ end
User.by_any_email(emails)
end
diff --git a/app/models/merge_request.rb b/app/models/merge_request.rb
index cc5152df12f..ab63a4dbb47 100644
--- a/app/models/merge_request.rb
+++ b/app/models/merge_request.rb
@@ -656,8 +656,8 @@ class MergeRequest < ApplicationRecord
[:assignees, :reviewers] + super
end
- def committers
- @committers ||= commits.committers
+ def committers(with_merge_commits: false)
+ @committers ||= commits.committers(with_merge_commits: with_merge_commits)
end
# Verifies if title has changed not taking into account Draft prefix
diff --git a/app/models/user.rb b/app/models/user.rb
index f36a58b1cdf..a55908c5e23 100644
--- a/app/models/user.rb
+++ b/app/models/user.rb
@@ -403,6 +403,7 @@ class User < MainClusterwide::ApplicationRecord
delegate :location, :location=, to: :user_detail, allow_nil: true
delegate :organization, :organization=, to: :user_detail, allow_nil: true
delegate :discord, :discord=, to: :user_detail, allow_nil: true
+ delegate :email_reset_offered_at, :email_reset_offered_at=, to: :user_detail, allow_nil: true
accepts_nested_attributes_for :user_preference, update_only: true
accepts_nested_attributes_for :user_detail, update_only: true
diff --git a/app/services/users/email_verification/update_email_service.rb b/app/services/users/email_verification/update_email_service.rb
new file mode 100644
index 00000000000..3f9b90b2960
--- /dev/null
+++ b/app/services/users/email_verification/update_email_service.rb
@@ -0,0 +1,76 @@
+# frozen_string_literal: true
+
+module Users
+ module EmailVerification
+ class UpdateEmailService
+ include ActionView::Helpers::DateHelper
+
+ RATE_LIMIT = :email_verification_code_send
+
+ def initialize(user:)
+ @user = user
+ end
+
+ def execute(email:)
+ return failure(:rate_limited) if rate_limited?
+ return failure(:already_offered) if already_offered?
+ return failure(:no_change) if no_change?(email)
+ return failure(:validation_error) unless update_email
+
+ success
+ end
+
+ private
+
+ attr_reader :user
+
+ def rate_limited?
+ Gitlab::ApplicationRateLimiter.throttled?(RATE_LIMIT, scope: user)
+ end
+
+ def already_offered?
+ user.email_reset_offered_at.present?
+ end
+
+ def no_change?(email)
+ user.email = email
+ !user.will_save_change_to_email?
+ end
+
+ def update_email
+ user.skip_confirmation_notification!
+ user.save
+ end
+
+ def success
+ { status: :success }
+ end
+
+ def failure(reason)
+ {
+ status: :failure,
+ reason: reason,
+ message: failure_message(reason)
+ }
+ end
+
+ def failure_message(reason)
+ case reason
+ when :rate_limited
+ interval = distance_of_time_in_words(Gitlab::ApplicationRateLimiter.rate_limits[RATE_LIMIT][:interval])
+ format(
+ s_("IdentityVerification|You've reached the maximum amount of tries. Wait %{interval} and try again."),
+ interval: interval
+ )
+ when :already_offered
+ s_('IdentityVerification|Email update is only offered once.')
+ when :no_change
+ s_('IdentityVerification|A code has already been sent to this email address. ' \
+ 'Check your spam folder or enter another email address.')
+ when :validation_error
+ user.errors.full_messages.join(' ')
+ end
+ end
+ end
+ end
+end
diff --git a/app/views/import/gitea/new.html.haml b/app/views/import/gitea/new.html.haml
index 4a293bb6f4e..f76e9f3f6ed 100644
--- a/app/views/import/gitea/new.html.haml
+++ b/app/views/import/gitea/new.html.haml
@@ -1,24 +1,25 @@
-- page_title _("Gitea Import")
+- page_title _("Gitea import")
- header_title _("New project"), new_project_path
- add_to_breadcrumbs s_('ProjectsNew|Import project'), new_project_path(anchor: 'import_project')
%h1.page-title.gl-font-size-h-display
= custom_icon('gitea_logo')
- = _('Import Projects from Gitea')
+ = _('Import projects from Gitea')
%p
- - link_to_personal_token = link_to(_('Personal Access Token'), 'https://docs.gitea.io/en-us/api-usage/#authentication-via-the-api')
- = _('To get started, please enter your Gitea Host URL and a %{link_to_personal_token}.').html_safe % { link_to_personal_token: link_to_personal_token }
+ - link_to_personal_token = link_to(_('personal access token'), 'https://docs.gitea.io/en-us/api-usage/#authentication-via-the-api')
+ = _('To get started, please enter your Gitea host URL and a %{link_to_personal_token}.').html_safe % { link_to_personal_token: link_to_personal_token }
= form_tag personal_access_token_import_gitea_path do
= hidden_field_tag(:namespace_id, params[:namespace_id])
.form-group.row
- = label_tag :gitea_host_url, _('Gitea Host URL'), class: 'col-form-label col-sm-2'
+ = label_tag :gitea_host_url, _('Gitea host URL'), class: 'col-form-label col-sm-2'
.col-sm-4
= text_field_tag :gitea_host_url, nil, placeholder: 'https://gitea.com', class: 'form-control gl-form-input'
.form-group.row
- = label_tag :personal_access_token, _('Personal Access Token'), class: 'col-form-label col-sm-2'
+ = label_tag :personal_access_token, _('Personal access token'), class: 'col-form-label col-sm-2'
.col-sm-4
= text_field_tag :personal_access_token, nil, class: 'form-control gl-form-input'
.form-actions
- = submit_tag _('List Your Gitea Repositories'), class: 'gl-button btn btn-confirm'
+ = render Pajamas::ButtonComponent.new(type: :submit, variant: :confirm) do
+ = _('List your Gitea repositories')
diff --git a/app/views/import/gitea/status.html.haml b/app/views/import/gitea/status.html.haml
index c717d4848f4..2dde642d8f0 100644
--- a/app/views/import/gitea/status.html.haml
+++ b/app/views/import/gitea/status.html.haml
@@ -1,6 +1,6 @@
-- page_title _("Gitea Import")
+- page_title _("Gitea import")
%h1.page-title.gl-font-size-h-display
= custom_icon('gitea_logo')
- = _('Import Projects from Gitea')
+ = _('Import projects from Gitea')
= render 'import/githubish_status', provider: 'gitea', default_namespace: @namespace
diff --git a/app/views/layouts/_page.html.haml b/app/views/layouts/_page.html.haml
index 3bb59db32aa..2df77c57b4b 100644
--- a/app/views/layouts/_page.html.haml
+++ b/app/views/layouts/_page.html.haml
@@ -7,7 +7,7 @@
- sidebar_panel = super_sidebar_nav_panel(nav: nav, user: current_user, group: group, project: @project, current_ref: current_ref, ref_type: @ref_type, viewed_user: @user, organization: @organization)
- sidebar_data = super_sidebar_context(current_user, group: group, project: @project, panel: sidebar_panel, panel_type: nav).to_json
- %aside.js-super-sidebar.super-sidebar.super-sidebar-loading{ data: { root_path: root_path, sidebar: sidebar_data, toggle_new_nav_endpoint: profile_preferences_url, force_desktop_expanded_sidebar: @force_desktop_expanded_sidebar.to_s, command_palette: command_palette_data(project: @project).to_json } }
+ %aside.js-super-sidebar.super-sidebar.super-sidebar-loading{ data: { root_path: root_path, sidebar: sidebar_data, toggle_new_nav_endpoint: profile_preferences_path, force_desktop_expanded_sidebar: @force_desktop_expanded_sidebar.to_s, command_palette: command_palette_data(project: @project).to_json } }
- if display_whats_new?
#whats-new-app{ data: { version_digest: whats_new_version_digest } }
diff --git a/app/views/shared/nav/_your_work_scope_header.html.haml b/app/views/shared/nav/_your_work_scope_header.html.haml
index 86172fb14ed..cdd0be3c682 100644
--- a/app/views/shared/nav/_your_work_scope_header.html.haml
+++ b/app/views/shared/nav/_your_work_scope_header.html.haml
@@ -1,5 +1,5 @@
%li.context-header
- = link_to root_url, title: _('Your work'), class: 'has-tooltip', data: { container: 'body', placement: 'right' } do
+ = link_to root_path, title: _('Your work'), class: 'has-tooltip', data: { container: 'body', placement: 'right' } do
%span.avatar-container.icon-avatar.rect-avatar.s32
= sprite_icon('work', size: 18)
%span.sidebar-context-title
diff --git a/config/feature_flags/development/keep_merge_commits_for_approvals.yml b/config/feature_flags/development/keep_merge_commits_for_approvals.yml
new file mode 100644
index 00000000000..a4791219dff
--- /dev/null
+++ b/config/feature_flags/development/keep_merge_commits_for_approvals.yml
@@ -0,0 +1,8 @@
+---
+name: keep_merge_commits_for_approvals
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/127744
+rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/419921
+milestone: '16.3'
+type: development
+group: group::code review
+default_enabled: false
diff --git a/config/routes/user.rb b/config/routes/user.rb
index 5723421cad2..c66f4872b21 100644
--- a/config/routes/user.rb
+++ b/config/routes/user.rb
@@ -55,6 +55,7 @@ devise_scope :user do
get '/users/almost_there' => 'confirmations#almost_there'
post '/users/resend_verification_code', to: 'sessions#resend_verification_code'
get '/users/successful_verification', to: 'sessions#successful_verification'
+ patch '/users/update_email', to: 'sessions#update_email'
# Redirect on GitHub authorization request errors. E.g. it could happen when user:
# 1. cancel authorization the GitLab OAuth app via GitHub to import GitHub repos
diff --git a/db/migrate/20230710094027_add_protected_paths_for_get_request_to_application_settings.rb b/db/migrate/20230710094027_add_protected_paths_for_get_request_to_application_settings.rb
new file mode 100644
index 00000000000..8f69a1cfdee
--- /dev/null
+++ b/db/migrate/20230710094027_add_protected_paths_for_get_request_to_application_settings.rb
@@ -0,0 +1,26 @@
+# frozen_string_literal: true
+
+class AddProtectedPathsForGetRequestToApplicationSettings < Gitlab::Database::Migration[2.1]
+ CONSTRAINT_NAME = 'app_settings_protected_paths_max_length'
+
+ disable_ddl_transaction!
+
+ def up
+ with_lock_retries do
+ add_column :application_settings, :protected_paths_for_get_request,
+ :text,
+ array: true,
+ default: [],
+ null: false,
+ if_not_exists: true
+ end
+
+ add_check_constraint :application_settings, 'CARDINALITY(protected_paths_for_get_request) <= 100', CONSTRAINT_NAME
+ end
+
+ def down
+ with_lock_retries do
+ remove_column :application_settings, :protected_paths_for_get_request, if_exists: true
+ end
+ end
+end
diff --git a/db/migrate/20230711151845_add_email_reset_offered_at_to_user_details.rb b/db/migrate/20230711151845_add_email_reset_offered_at_to_user_details.rb
new file mode 100644
index 00000000000..45b6a7fd57f
--- /dev/null
+++ b/db/migrate/20230711151845_add_email_reset_offered_at_to_user_details.rb
@@ -0,0 +1,9 @@
+# frozen_string_literal: true
+
+class AddEmailResetOfferedAtToUserDetails < Gitlab::Database::Migration[2.1]
+ enable_lock_retries!
+
+ def change
+ add_column :user_details, :email_reset_offered_at, :datetime_with_timezone
+ end
+end
diff --git a/db/migrate/20230728193736_add_has_merge_request_to_vulnerability_reads.rb b/db/migrate/20230728193736_add_has_merge_request_to_vulnerability_reads.rb
new file mode 100644
index 00000000000..aec120f3c80
--- /dev/null
+++ b/db/migrate/20230728193736_add_has_merge_request_to_vulnerability_reads.rb
@@ -0,0 +1,13 @@
+# frozen_string_literal: true
+
+class AddHasMergeRequestToVulnerabilityReads < Gitlab::Database::Migration[2.1]
+ enable_lock_retries!
+
+ def up
+ add_column :vulnerability_reads, :has_merge_request, :boolean, default: false, if_not_exists: true
+ end
+
+ def down
+ remove_column :vulnerability_reads, :has_merge_request, if_exists: true
+ end
+end
diff --git a/db/post_migrate/20230725035942_create_sync_index_for_ci_pipline_variables_pipeline_id.rb b/db/post_migrate/20230725035942_create_sync_index_for_ci_pipline_variables_pipeline_id.rb
new file mode 100644
index 00000000000..9b2207fd10f
--- /dev/null
+++ b/db/post_migrate/20230725035942_create_sync_index_for_ci_pipline_variables_pipeline_id.rb
@@ -0,0 +1,17 @@
+# frozen_string_literal: true
+
+class CreateSyncIndexForCiPiplineVariablesPipelineId < Gitlab::Database::Migration[2.1]
+ disable_ddl_transaction!
+
+ TABLE_NAME = :ci_pipeline_variables
+ INDEX_NAME = 'index_ci_pipeline_variables_on_pipeline_id_bigint_and_key'
+ COLUMNS = [:pipeline_id_convert_to_bigint, :key]
+
+ def up
+ add_concurrent_index TABLE_NAME, COLUMNS, unique: true, name: INDEX_NAME
+ end
+
+ def down
+ remove_concurrent_index_by_name TABLE_NAME, INDEX_NAME
+ end
+end
diff --git a/db/schema_migrations/20230710094027 b/db/schema_migrations/20230710094027
new file mode 100644
index 00000000000..ecbfffa8df6
--- /dev/null
+++ b/db/schema_migrations/20230710094027
@@ -0,0 +1 @@
+1b0168b871414523c2da47b811336660c51819512b54c0d8728386156028615a \ No newline at end of file
diff --git a/db/schema_migrations/20230711151845 b/db/schema_migrations/20230711151845
new file mode 100644
index 00000000000..1a96738833c
--- /dev/null
+++ b/db/schema_migrations/20230711151845
@@ -0,0 +1 @@
+4285dfe98db13ffe7f97a69f95e14c584b32cb20526766a8179012c616ef3d0a \ No newline at end of file
diff --git a/db/schema_migrations/20230725035942 b/db/schema_migrations/20230725035942
new file mode 100644
index 00000000000..4584dd37211
--- /dev/null
+++ b/db/schema_migrations/20230725035942
@@ -0,0 +1 @@
+9447f1376419aa2ab7df07a86ee18cd9f9de223a874f24c9b7177235712e768c \ No newline at end of file
diff --git a/db/schema_migrations/20230728193736 b/db/schema_migrations/20230728193736
new file mode 100644
index 00000000000..5b02838e541
--- /dev/null
+++ b/db/schema_migrations/20230728193736
@@ -0,0 +1 @@
+95c35da7b042c627c8abe1f23cc6287a034206eee4a716f213055dba0da26934 \ No newline at end of file
diff --git a/db/structure.sql b/db/structure.sql
index 2e66ac6efa3..f83499f1442 100644
--- a/db/structure.sql
+++ b/db/structure.sql
@@ -11860,6 +11860,7 @@ CREATE TABLE application_settings (
package_registry_allow_anyone_to_pull_option boolean DEFAULT true NOT NULL,
bulk_import_max_download_file_size bigint DEFAULT 5120 NOT NULL,
max_import_remote_file_size bigint DEFAULT 10240 NOT NULL,
+ protected_paths_for_get_request text[] DEFAULT '{}'::text[] NOT NULL,
CONSTRAINT app_settings_container_reg_cleanup_tags_max_list_size_positive CHECK ((container_registry_cleanup_tags_service_max_list_size >= 0)),
CONSTRAINT app_settings_container_registry_pre_import_tags_rate_positive CHECK ((container_registry_pre_import_tags_rate >= (0)::numeric)),
CONSTRAINT app_settings_dep_proxy_ttl_policies_worker_capacity_positive CHECK ((dependency_proxy_ttl_group_policy_worker_capacity >= 0)),
@@ -11870,6 +11871,7 @@ CREATE TABLE application_settings (
CONSTRAINT app_settings_max_terraform_state_size_bytes_check CHECK ((max_terraform_state_size_bytes >= 0)),
CONSTRAINT app_settings_p_cleanup_package_file_worker_capacity_positive CHECK ((packages_cleanup_package_file_worker_capacity >= 0)),
CONSTRAINT app_settings_pkg_registry_cleanup_pol_worker_capacity_gte_zero CHECK ((package_registry_cleanup_policies_worker_capacity >= 0)),
+ CONSTRAINT app_settings_protected_paths_max_length CHECK ((cardinality(protected_paths_for_get_request) <= 100)),
CONSTRAINT app_settings_registry_exp_policies_worker_capacity_positive CHECK ((container_registry_expiration_policies_worker_capacity >= 0)),
CONSTRAINT app_settings_registry_repair_worker_max_concurrency_positive CHECK ((container_registry_data_repair_detail_worker_max_concurrency >= 0)),
CONSTRAINT app_settings_yaml_max_depth_positive CHECK ((max_yaml_depth > 0)),
@@ -23823,6 +23825,7 @@ CREATE TABLE user_details (
discord text DEFAULT ''::text NOT NULL,
enterprise_group_id bigint,
enterprise_group_associated_at timestamp with time zone,
+ email_reset_offered_at timestamp with time zone,
CONSTRAINT check_245664af82 CHECK ((char_length(webauthn_xid) <= 100)),
CONSTRAINT check_444573ee52 CHECK ((char_length(skype) <= 500)),
CONSTRAINT check_466a25be35 CHECK ((char_length(twitter) <= 500)),
@@ -24596,6 +24599,7 @@ CREATE TABLE vulnerability_reads (
casted_cluster_agent_id bigint,
namespace_id bigint,
dismissal_reason smallint,
+ has_merge_request boolean DEFAULT false,
CONSTRAINT check_380451bdbe CHECK ((char_length(location_image) <= 2048)),
CONSTRAINT check_a105eb825a CHECK ((char_length(cluster_agent_id) <= 10))
);
@@ -30916,6 +30920,8 @@ CREATE INDEX index_ci_pipeline_schedules_on_project_id ON ci_pipeline_schedules
CREATE UNIQUE INDEX index_ci_pipeline_variables_on_pipeline_id_and_key ON ci_pipeline_variables USING btree (pipeline_id, key);
+CREATE UNIQUE INDEX index_ci_pipeline_variables_on_pipeline_id_bigint_and_key ON ci_pipeline_variables USING btree (pipeline_id_convert_to_bigint, key);
+
CREATE INDEX index_ci_pipelines_config_on_pipeline_id ON ci_pipelines_config USING btree (pipeline_id);
CREATE INDEX index_ci_pipelines_for_ondemand_dast_scans ON ci_pipelines USING btree (id) WHERE (source = 13);
diff --git a/doc/administration/reference_architectures/50k_users.md b/doc/administration/reference_architectures/50k_users.md
index 4c815a84070..b2f01266b14 100644
--- a/doc/administration/reference_architectures/50k_users.md
+++ b/doc/administration/reference_architectures/50k_users.md
@@ -2283,7 +2283,7 @@ the overall makeup as desired as long as the minimum CPU and Memory requirements
| Service Node Group | Nodes | Configuration | GCP | AWS | Min Allocatable CPUs and Memory |
|---------------------|-------|-------------------------|-----------------|--------------|---------------------------------|
-| Webservice | 16 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `m5.8xlarge` | 510 vCPU, 472 GB memory |
+| Webservice | 16 | 32 vCPU, 28.8 GB memory | `n1-highcpu-32` | `c5.9xlarge` | 510 vCPU, 472 GB memory |
| Sidekiq | 4 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 15.5 vCPU, 50 GB memory |
| Supporting services | 2 | 4 vCPU, 15 GB memory | `n1-standard-4` | `m5.xlarge` | 7.75 vCPU, 25 GB memory |
diff --git a/doc/architecture/blueprints/cells/cells-feature-admin-area.md b/doc/architecture/blueprints/cells/cells-feature-admin-area.md
index 31d5388d40b..d788e9873a8 100644
--- a/doc/architecture/blueprints/cells/cells-feature-admin-area.md
+++ b/doc/architecture/blueprints/cells/cells-feature-admin-area.md
@@ -15,21 +15,16 @@ we can document the reasons for not choosing this approach.
# Cells: Admin Area
-In our Cells architecture proposal we plan to share all admin related tables in
-GitLab. This allows simpler management of all Cells in one interface and reduces
-the risk of settings diverging in different Cells. This introduces challenges
-with admin pages that allow you to manage data that will be spread across all
-Cells.
+In our Cells architecture proposal we plan to share all admin related tables in GitLab.
+This allows for simpler management of all Cells in one interface and reduces the risk of settings diverging in different Cells.
+This introduces challenges with Admin Area pages that allow you to manage data that will be spread across all Cells.
## 1. Definition
-There are consequences for admin pages that contain data that spans "the whole
-instance" as the Admin pages may be served by any Cell or possibly just 1 cell.
-There are already many parts of the Admin interface that will have data that
-spans many cells. For example lists of all Groups, Projects, Topics, Jobs,
-Analytics, Applications and more. There are also administrative monitoring
-capabilities in the Admin page that will span many cells such as the "Background
-Jobs" and "Background Migrations" pages.
+There are consequences for Admin Area pages that contain data that span "the whole instance" as the Admin Area pages may be served by any Cell or possibly just one Cell.
+There are already many parts of the Admin Area that will have data that span many Cells.
+For example lists of all Groups, Projects, Topics, Jobs, Analytics, Applications and more.
+There are also administrative monitoring capabilities in the Admin Area that will span many Cells such as the "Background Jobs" and "Background Migrations" pages.
## 2. Data flow
@@ -38,18 +33,18 @@ Jobs" and "Background Migrations" pages.
We will need to decide how to handle these exceptions with a few possible
options:
-1. Move all these pages out into a dedicated per-cell Admin section. Probably
+1. Move all these pages out into a dedicated per-Cell admin section. Probably
the URL will need to be routable to a single Cell like `/cells/<cell_id>/admin`,
- then we can display this data per Cell. These pages will be distinct from
- other Admin pages which control settings that are shared across all Cells. We
+ then we can display these data per Cell. These pages will be distinct from
+ other Admin Area pages which control settings that are shared across all Cells. We
will also need to consider how this impacts self-managed customers and
- whether, or not, this should be visible for single-cell instances of GitLab.
+ whether, or not, this should be visible for single-Cell instances of GitLab.
1. Build some aggregation interfaces for this data so that it can be fetched
from all Cells and presented in a single UI. This may be beneficial to an
administrator that needs to see and filter all data at a glance, especially
when they don't know which Cell the data is on. The downside, however, is
- that building this kind of aggregation is very tricky when all the Cells are
- designed to be totally independent, and it does also enforce more strict
+ that building this kind of aggregation is very tricky when all Cells are
+ designed to be totally independent, and it does also enforce stricter
requirements on compatibility between Cells.
## 4. Evaluation
diff --git a/doc/architecture/blueprints/cells/cells-feature-backups.md b/doc/architecture/blueprints/cells/cells-feature-backups.md
index b5d5d7afdcf..3d20d6e2caa 100644
--- a/doc/architecture/blueprints/cells/cells-feature-backups.md
+++ b/doc/architecture/blueprints/cells/cells-feature-backups.md
@@ -15,47 +15,38 @@ we can document the reasons for not choosing this approach.
# Cells: Backups
-Each cells will take its own backups, and consequently have its own isolated
-backup / restore procedure.
+Each Cell will take its own backups, and consequently have its own isolated backup/restore procedure.
## 1. Definition
-GitLab Backup takes a backup of the PostgreSQL database used by the application,
-and also Git repository data.
+GitLab backup takes a backup of the PostgreSQL database used by the application, and also Git repository data.
## 2. Data flow
-Each cell has a number of application databases to back up (for example, `main`, and `ci`).
-
-Additionally, there may be cluster-wide metadata tables (for example, `users` table)
-which is directly accessible via PostgreSQL.
+Each Cell has a number of application databases to back up (for example, `main`, and `ci`).
+Additionally, there may be cluster-wide metadata tables (for example, `users` table) which is directly accessible via PostgreSQL.
## 3. Proposal
### 3.1. Cluster-wide metadata
-It is currently unknown how cluster-wide metadata tables will be accessible. We
-may choose to have cluster-wide metadata tables backed up separately, or have
-each cell back up its copy of cluster-wide metdata tables.
+It is currently unknown how cluster-wide metadata tables will be accessible.
+We may choose to have cluster-wide metadata tables backed up separately, or have each Cell back up its copy of cluster-wide metadata tables.
### 3.2 Consistency
#### 3.2.1 Take backups independently
-As each cell will communicate with each other via API, and there will be no joins
-to the users table, it should be acceptable for each cell to take a backup
-independently of each other.
+As each Cell will communicate with each other via API, and there will be no joins to the `users` table, it should be acceptable for each Cell to take a backup independently of each other.
#### 3.2.2 Enforce snapshots
-We can require that each cell take a snapshot for the PostgreSQL databases at
-around the same time to allow for a consistent-enough backup.
+We can require that each Cell take a snapshot for the PostgreSQL databases at around the same time to allow for a consistent enough backup.
## 4. Evaluation
-As the number of cells increases, it will likely not be feasible to take a
-snapshot at the same time for all cells. Hence taking backups independently is
-the better option.
+As the number of Cells increases, it will likely not be feasible to take a snapshot at the same time for all Cells.
+Hence taking backups independently is the better option.
## 4.1. Pros
diff --git a/doc/architecture/blueprints/cells/cells-feature-ci-runners.md b/doc/architecture/blueprints/cells/cells-feature-ci-runners.md
index 8a6790ae49f..4e7cea5bfd5 100644
--- a/doc/architecture/blueprints/cells/cells-feature-ci-runners.md
+++ b/doc/architecture/blueprints/cells/cells-feature-ci-runners.md
@@ -15,156 +15,129 @@ we can document the reasons for not choosing this approach.
# Cells: CI Runners
-GitLab in order to execute CI jobs [GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner/),
-very often managed by customer in their infrastructure.
-
-All CI jobs created as part of CI pipeline are run in a context of project
-it poses a challenge how to manage GitLab Runners.
+GitLab executes CI jobs via [GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner/), very often managed by customers in their infrastructure.
+All CI jobs created as part of the CI pipeline are run in the context of a Project.
+This poses a challenge how to manage GitLab Runners.
## 1. Definition
There are 3 different types of runners:
-- instance-wide: runners that are registered globally with specific tags (selection criteria)
-- group runners: runners that execute jobs from a given top-level group or subprojects of that group
-- project runners: runners that execute jobs from projects or many projects: some runners might
- have projects assigned from projects in different top-level groups.
+- Instance-wide: Runners that are registered globally with specific tags (selection criteria)
+- Group runners: Runners that execute jobs from a given top-level Group or Projects in that Group
+- Project runners: Runners that execute jobs from one Projects or many Projects: some runners might
+ have Projects assigned from Projects in different top-level Groups.
-This alongside with existing data structure where `ci_runners` is a table describing
-all types of runners poses a challenge how the `ci_runners` should be managed in a Cells environment.
+This, alongside with the existing data structure where `ci_runners` is a table describing all types of runners, poses a challenge as to how the `ci_runners` should be managed in a Cells environment.
## 2. Data flow
-GitLab Runners use a set of globally scoped endpoints to:
+GitLab runners use a set of globally scoped endpoints to:
-- registration of a new runner via registration token `https://gitlab.com/api/v4/runners`
+- Register a new runner via registration token `https://gitlab.com/api/v4/runners`
([subject for removal](../runner_tokens/index.md)) (`registration token`)
-- creation of a new runner in the context of a user `https://gitlab.com/api/v4/user/runners` (`runner token`)
-- requests jobs via an authenticated `https://gitlab.com/api/v4/jobs/request` endpoint (`runner token`)
-- upload job status via `https://gitlab.com/api/v4/jobs/:job_id` (`build token`)
-- upload trace via `https://gitlab.com/api/v4/jobs/:job_id/trace` (`build token`)
-- download and upload artifacts via `https://gitlab.com/api/v4/jobs/:job_id/artifacts` (`build token`)
+- Create a new runner in the context of a user `https://gitlab.com/api/v4/user/runners` (`runner token`)
+- Request jobs via an authenticated `https://gitlab.com/api/v4/jobs/request` endpoint (`runner token`)
+- Upload job status via `https://gitlab.com/api/v4/jobs/:job_id` (`build token`)
+- Upload trace via `https://gitlab.com/api/v4/jobs/:job_id/trace` (`build token`)
+- Download and upload artifacts via `https://gitlab.com/api/v4/jobs/:job_id/artifacts` (`build token`)
Currently three types of authentication tokens are used:
-- runner registration token ([subject for removal](../runner_tokens/index.md))
-- runner token representing an registered runner in a system with specific configuration (`tags`, `locked`, etc.)
-- build token representing an ephemeral token giving a limited access to updating a specific
- job, uploading artifacts, downloading dependent artifacts, downloading and uploading
- container registry images
+- Runner registration token ([subject for removal](../runner_tokens/index.md))
+- Runner token representing a registered runner in a system with specific configuration (`tags`, `locked`, etc.)
+- Build token representing an ephemeral token giving limited access to updating a specific job, uploading artifacts, downloading dependent artifacts, downloading and uploading container registry images
-Each of those endpoints do receive an authentication token via header (`JOB-TOKEN` for `/trace`)
-or body parameter (`token` all other endpoints).
+Each of those endpoints receive an authentication token via header (`JOB-TOKEN` for `/trace`) or body parameter (`token` all other endpoints).
-Since the CI pipeline would be created in a context of a specific Cell it would be required
-that pick of a build would have to be processed by that particular Cell. This requires
-that build picking depending on a solution would have to be either:
+Since the CI pipeline would be created in the context of a specific Cell, it would be required that pick of a build would have to be processed by that particular Cell.
+This requires that build picking depending on a solution would have to be either:
-- routed to correct Cell for a first time
-- be made to be two phase: request build from global pool, claim build on a specific Cell using a Cell specific URL
+- Routed to the correct Cell for the first time
+- Be two-phased: Request build from global pool, claim build on a specific Cell using a Cell specific URL
## 3. Proposal
-This section describes various proposals. Reader should consider that those
-proposals do describe solutions for different problems. Many or some aspects
-of those proposals might be the solution to the stated problem.
-
### 3.1. Authentication tokens
-Even though the paths for CI Runners are not routable they can be made routable with
-those two possible solutions:
+Even though the paths for CI runners are not routable, they can be made routable with these two possible solutions:
- The `https://gitlab.com/api/v4/jobs/request` uses a long polling mechanism with
- a ticketing mechanism (based on `X-GitLab-Last-Update` header). Runner when first
- starts sends a request to GitLab to which GitLab responds with either a build to pick
+ a ticketing mechanism (based on `X-GitLab-Last-Update` header). When the runner first
+ starts, it sends a request to GitLab to which GitLab responds with either a build to pick
by runner. This value is completely controlled by GitLab. This allows GitLab
- to use JWT or any other means to encode `cell` identifier that could be easily
+ to use JWT or any other means to encode a `cell` identifier that could be easily
decodable by Router.
-- The majority of communication (in terms of volume) is using `build token` making it
- the easiest target to change since GitLab is sole owner of the token that Runner later
- uses for specific job. There were prior discussions about not storing `build token`
- but rather using `JWT` token with defined scopes. Such token could encode the `cell`
- to which router could easily route all requests.
+- The majority of communication (in terms of volume) is using `build token`, making it
+ the easiest target to change since GitLab is the sole owner of the token that the runner later
+ uses for a specific job. There were prior discussions about not storing the `build token`
+ but rather using a `JWT` token with defined scopes. Such a token could encode the `cell`
+ to which the Router could route all requests.
### 3.2. Request body
-- The most of used endpoints pass authentication token in request body. It might be desired
- to use HTTP Headers as an easier way to access this information by Router without
+- The most used endpoints pass the authentication token in the request body. It might be desired
+ to use HTTP headers as an easier way to access this information by Router without
a need to proxy requests.
-### 3.3. Instance-wide are Cell local
+### 3.3. Instance-wide are Cell-local
We can pick a design where all runners are always registered and local to a given Cell:
-- Each Cell has it's own set of instance-wide runners that are updated at it's own pace
-- The project runners can only be linked to projects from the same organization
- creating strong isolation.
+- Each Cell has its own set of instance-wide runners that are updated at its own pace
+- The Project runners can only be linked to Projects from the same Organization, creating strong isolation.
- In this model the `ci_runners` table is local to the Cell.
-- In this model we would require the above endpoints to be scoped to a Cell in some way
- or made routable. It might be via prefixing them, adding additional Cell parameter,
- or providing much more robust way to decode runner token and match it to Cell.
-- If routable token is used, we could move away from cryptographic random stored in
- database to rather prefer to use JWT tokens that would encode
-- The Admin Area showing registered Runners would have to be scoped to a Cell
-
-This model might be desired since it provides strong isolation guarantees.
-This model does significantly increase maintenance overhead since each Cell is managed
-separately.
+- In this model we would require the above endpoints to be scoped to a Cell in some way, or be made routable. It might be via prefixing them, adding additional Cell parameters, or providing much more robust ways to decode runner tokens and match it to a Cell.
+- If a routable token is used, we could move away from cryptographic random stored in database to rather prefer to use JWT tokens.
+- The Admin Area showing registered runners would have to be scoped to a Cell.
-This model may require adjustments to runner tags feature so that projects have consistent runner experience across cells.
+This model might be desired because it provides strong isolation guarantees.
+This model does significantly increase maintenance overhead because each Cell is managed separately.
+This model may require adjustments to the runner tags feature so that Projects have a consistent runner experience across Cells.
### 3.4. Instance-wide are cluster-wide
-Contrary to proposal where all runners are Cell local, we can consider that runners
+Contrary to the proposal where all runners are Cell-local, we can consider that runners
are global, or just instance-wide runners are global.
-However, this requires significant overhaul of system and to change the following aspects:
+However, this requires significant overhaul of the system and we would have to change the following aspects:
-- `ci_runners` table would likely have to be split decomposed into `ci_instance_runners`, ...
-- all interfaces would have to be adopted to use correct table
-- build queuing would have to be reworked to be two phase where each Cell would know of all pending
- and running builds, but the actual claim of a build would happen against a Cell containing data
-- likely `ci_pending_builds` and `ci_running_builds` would have to be made `cluster-wide` tables
- increasing likelihood of creating hotspots in a system related to CI queueing
+- The `ci_runners` table would likely have to be decomposed into `ci_instance_runners`, ...
+- All interfaces would have to be adopted to use the correct table.
+- Build queuing would have to be reworked to be two-phased where each Cell would know of all pending and running builds, but the actual claim of a build would happen against a Cell containing data.
+- It is likely that `ci_pending_builds` and `ci_running_builds` would have to be made `cluster-wide` tables, increasing the likelihood of creating hotspots in a system related to CI queueing.
-This model makes it complex to implement from engineering side. Does make some data being shared
-between Cells. Creates hotspots / scalability issues in a system (ex. during abuse) that
-might impact experience of organizations on other Cells.
+This model is complex to implement from an engineering perspective.
+Some data are shared between Cells.
+It creates hotspots/scalability issues in a system that might impact the experience of Organizations on other Cells, for instance during abuse.
### 3.5. GitLab CI Daemon
-Another potential solution to explore is to have a dedicated service responsible for builds queueing
-owning it's database and working in a model of either sharded or celled service. There were prior
-discussions about [CI/CD Daemon](https://gitlab.com/gitlab-org/gitlab/-/issues/19435).
+Another potential solution to explore is to have a dedicated service responsible for builds queueing, owning its database and working in a model of either sharded or Cell-ed service.
+There were prior discussions about [CI/CD Daemon](https://gitlab.com/gitlab-org/gitlab/-/issues/19435).
-If the service would be sharded:
+If the service is sharded:
-- depending on a model if runners are cluster-wide or cell-local this service would have to fetch
- data from all Cells
-- if the sharded service would be used we could adapt a model of either sharing database containing
- `ci_pending_builds/ci_running_builds` with the service
-- if the sharded service would be used we could consider a push model where each Cell pushes to CI/CD Daemon
- builds that should be picked by Runner
-- the sharded service would be aware which Cell is responsible for processing the given build and could
- route processing requests to designated Cell
+- Depending on the model, if runners are cluster-wide or Cell-local, this service would have to fetch data from all Cells.
+- If the sharded service is used we could adapt a model of sharing a database containing `ci_pending_builds/ci_running_builds` with the service.
+- If the sharded service is used we could consider a push model where each Cell pushes to CI/CD Daemon builds that should be picked by runner.
+- The sharded service would be aware which Cell is responsible for processing the given build and could route processing requests to the designated Cell.
-If the service would be celled:
+If the service is Cell-ed:
-- all expectations of routable endpoints are still valid
+- All expectations of routable endpoints are still valid.
-In general usage of CI Daemon does not help significantly with the stated problem. However, this offers
-a few upsides related to more efficient processing and decoupling model: push model and it opens a way
-to offer stateful communication with GitLab Runners (ex. gRPC or Websockets).
+In general usage of CI Daemon does not help significantly with the stated problem.
+However, this offers a few upsides related to more efficient processing and decoupling model: push model and it opens a way to offer stateful communication with GitLab runners (ex. gRPC or Websockets).
## 4. Evaluation
-Considering all solutions it appears that solution giving the most promise is:
+Considering all options it appears that the most promising solution is to:
-- use "instance-wide are Cell local"
-- refine endpoints to have routable identities (either via specific paths, or better tokens)
+- Use [Instance-wide are Cell-local](#33-instance-wide-are-cell-local)
+- Refine endpoints to have routable identities (either via specific paths, or better tokens)
-Other potential upsides is to get rid of `ci_builds.token` and rather use a `JWT token`
-that can much better and easier encode wider set of scopes allowed by CI runner.
+Another potential upside is to get rid of `ci_builds.token` and rather use a `JWT token` that can much better and easier encode a wider set of scopes allowed by CI runner.
## 4.1. Pros
diff --git a/doc/architecture/blueprints/cells/cells-feature-container-registry.md b/doc/architecture/blueprints/cells/cells-feature-container-registry.md
index a5761808941..25af65a8700 100644
--- a/doc/architecture/blueprints/cells/cells-feature-container-registry.md
+++ b/doc/architecture/blueprints/cells/cells-feature-container-registry.md
@@ -15,46 +15,37 @@ we can document the reasons for not choosing this approach.
# Cells: Container Registry
-GitLab Container Registry is a feature allowing to store Docker Container Images
-in GitLab. You can read about GitLab integration [here](../../../user/packages/container_registry/index.md).
+GitLab [Container Registry](../../../user/packages/container_registry/index.md) is a feature allowing to store Docker container images in GitLab.
## 1. Definition
-GitLab Container Registry is a complex service requiring usage of PostgreSQL, Redis
-and Object Storage dependencies. Right now there's undergoing work to introduce
-[Container Registry Metadata](../container_registry_metadata_database/index.md)
-to optimize data storage and image retention policies of Container Registry.
+GitLab Container Registry is a complex service requiring usage of PostgreSQL, Redis and Object Storage dependencies.
+Right now there's undergoing work to introduce [Container Registry Metadata](../container_registry_metadata_database/index.md) to optimize data storage and image retention policies of Container Registry.
-GitLab Container Registry is serving as a container for stored data,
-but on it's own does not authenticate `docker login`. The `docker login`
-is executed with user credentials (can be `personal access token`)
-or CI build credentials (ephemeral `ci_builds.token`).
+GitLab Container Registry is serving as a container for stored data, but on its own does not authenticate `docker login`.
+The `docker login` is executed with user credentials (can be `personal access token`) or CI build credentials (ephemeral `ci_builds.token`).
-Container Registry uses data deduplication. It means that the same blob
-(image layer) that is shared between many projects is stored only once.
+Container Registry uses data deduplication.
+It means that the same blob (image layer) that is shared between many Projects is stored only once.
Each layer is hashed by `sha256`.
-The `docker login` does request JWT time-limited authentication token that
-is signed by GitLab, but validated by Container Registry service. The JWT
-token does store all authorized scopes (`container repository images`)
-and operation types (`push` or `pull`). A single JWT authentication token
-can be have many authorized scopes. This allows container registry and client
-to mount existing blobs from another scopes. GitLab responds only with
-authorized scopes. Then it is up to GitLab Container Registry to validate
-if the given operation can be performed.
+The `docker login` does request a JWT time-limited authentication token that is signed by GitLab, but validated by Container Registry service.
+The JWT token does store all authorized scopes (`container repository images`) and operation types (`push` or `pull`).
+A single JWT authentication token can have many authorized scopes.
+This allows Container Registry and client to mount existing blobs from other scopes.
+GitLab responds only with authorized scopes.
+Then it is up to GitLab Container Registry to validate if the given operation can be performed.
-The GitLab.com pages are always scoped to project. Each project can have many
-container registry images attached.
+The GitLab.com pages are always scoped to a Project.
+Each Project can have many container registry images attached.
-Currently in case of GitLab.com the actual registry service is served
-via `https://registry.gitlab.com`.
+Currently, on GitLab.com the actual registry service is served via `https://registry.gitlab.com`.
The main identifiable problems are:
-- the authentication request (`https://gitlab.com/jwt/auth`) that is processed by GitLab.com
-- the `https://registry.gitlab.com` that is run by external service and uses it's own data store
-- the data deduplication, the Cells architecture with registry run in a Cell would reduce
- efficiency of data storage
+- The authentication request (`https://gitlab.com/jwt/auth`) that is processed by GitLab.com.
+- The `https://registry.gitlab.com` that is run by an external service and uses its own data store.
+- Data deduplication. The Cells architecture with registry run in a Cell would reduce efficiency of data storage.
## 2. Data flow
@@ -99,33 +90,24 @@ curl \
### 3.1. Shard Container Registry separately to Cells architecture
-Due to it's architecture it extensive architecture and in general highly scalable
-horizontal architecture it should be evaluated if the GitLab Container Registry
-should be run not in Cell, but in a Cluster and be scaled independently.
-
+Due to its extensive and in general highly scalable horizontal architecture it should be evaluated if the GitLab Container Registry should be run not in Cell, but in a Cluster and be scaled independently.
This might be easier, but would definitely not offer the same amount of data isolation.
### 3.2. Run Container Registry within a Cell
-It appears that except `/jwt/auth` which would likely have to be processed by Router
-(to decode `scope`) the container registry could be run as a local service of a Cell.
-
-The actual data at least in case of GitLab.com is not forwarded via registry,
-but rather served directly from Object Storage / CDN.
+It appears that except `/jwt/auth` which would likely have to be processed by Router (to decode `scope`) the Container Registry could be run as a local service of a Cell.
+The actual data at least in case of GitLab.com is not forwarded via registry, but rather served directly from Object Storage / CDN.
Its design encodes container repository image in a URL that is easily routable.
-It appears that we could re-use the same stateless Router service in front of Container Registry
-to serve manifests and blobs redirect.
+It appears that we could re-use the same stateless Router service in front of Container Registry to serve manifests and blobs redirect.
-The only downside is increased complexity of managing standalone registry for each Cell,
-but this might be desired approach.
+The only downside is increased complexity of managing standalone registry for each Cell, but this might be desired approach.
## 4. Evaluation
-There do not seem any theoretical problems with running GitLab Container Registry in a Cell.
-Service seems that can be easily made routable to work well.
-
-The practical complexities are around managing complex service from infrastructure side.
+There do not seem to be any theoretical problems with running GitLab Container Registry in a Cell.
+It seems that the service can be easily made routable to work well.
+The practical complexities are around managing a complex service from an infrastructure side.
## 4.1. Pros
diff --git a/doc/architecture/blueprints/cells/cells-feature-contributions-forks.md b/doc/architecture/blueprints/cells/cells-feature-contributions-forks.md
index 8a67383c5e4..8e144386908 100644
--- a/doc/architecture/blueprints/cells/cells-feature-contributions-forks.md
+++ b/doc/architecture/blueprints/cells/cells-feature-contributions-forks.md
@@ -15,37 +15,33 @@ we can document the reasons for not choosing this approach.
# Cells: Contributions: Forks
-[Forking workflow](../../../user/project/repository/forking_workflow.md) allows users
-to copy existing project sources into their own namespace of choice (personal or group).
+The [Forking workflow](../../../user/project/repository/forking_workflow.md) allows users to copy existing Project sources into their own namespace of choice (Personal or Group).
## 1. Definition
-[Forking workflow](../../../user/project/repository/forking_workflow.md) is common workflow
-with various usage patterns:
+The [Forking workflow](../../../user/project/repository/forking_workflow.md) is a common workflow with various usage patterns:
-- allows users to contribute back to upstream project
-- persist repositories into their personal namespace
-- copy to make changes and release as modified project
+- It allows users to contribute back to upstream Project.
+- It persists repositories into their Personal Namespace.
+- Users can copy to make changes and release as modified Project.
-Forks allow users not having write access to parent project to make changes. The forking workflow
-is especially important for the Open Source community which is able to contribute back
-to public projects. However, it is equally important in some companies which prefer the strong split
-of responsibilities and tighter access control. The access to project is restricted
-to designated list of developers.
+Forks allow users not having write access to a parent Project to make changes.
+The forking workflow is especially important for the open source community to contribute back to public Projects.
+However, it is equally important in some companies that prefer a strong split of responsibilities and tighter access control.
+The access to a Project is restricted to a designated list of developers.
Forks enable:
-- tighter control of who can modify the upstream project
-- split of the responsibilities: parent project might use CI configuration connecting to production systems
-- run CI pipelines in context of fork in much more restrictive environment
-- consider all forks to be unveted which reduces risks of leaking secrets, or any other information
- tied with the project
+- Tighter control of who can modify the upstream Project.
+- Split of responsibilities: Parent Project might use CI configuration connecting to production systems.
+- To run CI pipelines in the context of a fork in a much more restrictive environment.
+- To consider all forks to be unvetted which reduces risks of leaking secrets, or any other information tied to the Project.
-The forking model is problematic in Cells architecture for following reasons:
+The forking model is problematic in a Cells architecture for the following reasons:
-- Forks are clones of existing repositories, forks could be created across different organizations, Cells and Gitaly shards.
-- User can create merge request and contribute back to upstream project, this upstream project might in a different organization and Cell.
-- The merge request CI pipeline is to executed in a context of source project, but presented in a context of target project.
+- Forks are clones of existing repositories. Forks could be created across different Organizations, Cells and Gitaly shards.
+- Users can create merge requests and contribute back to an upstream Project. This upstream Project might in a different Organization and Cell.
+- The merge request CI pipeline is executed in the context of the source Project, but presented in the context of the target Project.
## 2. Data flow
@@ -53,66 +49,55 @@ The forking model is problematic in Cells architecture for following reasons:
### 3.1. Intra-Cluster forks
-This proposal makes us to implement forks as a intra-ClusterCell forks where communication is done via API
-between all trusted Cells of a cluster:
-
-- Forks when created, they are created always in context of user choice of group.
-- Forks are isolated from Organization.
-- Organization or group owner could disable forking across organizations or forking in general.
-- When a Merge Request is created it is created in context of target project, referencing
- external project on another Cell.
-- To target project the merge reference is transfered that is used for presenting information
- in context of target project.
-- CI pipeline is fetched in context of source project as it-is today, the result is fetched into
- Merge Request of target project.
-- The Cell holding target project internally uses GraphQL to fetch status of source project
- and include in context of the information for merge request.
+This proposal implements forks as intra-Cluster forks where communication is done via API between all trusted Cells of a cluster:
+
+- Forks are created always in the context of a user's choice of Group.
+- Forks are isolated from the Organization.
+- Organization or Group owner could disable forking across Organizations, or forking in general.
+- A merge request is created in the context of the target Project, referencing the external Project on another Cell.
+- To target Project the merge reference is transferred that is used for presenting information in context of the target Project.
+- CI pipeline is fetched in the context of the source Project as it is today, the result is fetched into the merge request of the target Project.
+- The Cell holding the target Project internally uses GraphQL to fetch the status of the source Project and includes in context of the information for merge request.
Upsides:
-- All existing forks continue to work as-is, as they are treated as intra-Cluster forks.
+- All existing forks continue to work as they are, as they are treated as intra-Cluster forks.
Downsides:
-- The purpose of Organizations is to provide strong isolation between organizations
- allowing to fork across does break security boundaries.
-- However, this is no different to ability of users today to clone repository to local computer
- and push it to any repository of choice.
-- Access control of source project can be lower than those of target project. System today
- requires that in order to contribute back the access level needs to be the same for fork and upstream.
-
-### 3.2. Forks are created in a personal namespace of the current organization
-
-Instead of creating projects across organizations, the forks are created in a user personal namespace
-tied with the organization. Example:
-
-- Each user that is part of organization receives their personal namespace. For example for `GitLab Inc.`
- it could be `gitlab.com/organization/gitlab-inc/@ayufan`.
-- The user has to fork into it's own personal namespace of the organization.
-- The user has that many personal namespaces as many organizations it belongs to.
-- The personal namespace behaves similar to currently offered personal namespace.
-- The user can manage and create projects within a personal namespace.
-- The organization can prevent or disable usage of personal namespaces disallowing forks.
-- All current forks are migrated into personal namespace of user in Organization.
-- All forks are part of to the organization.
-- The forks are not federated features.
-- The personal namespace and forked project do not share configuration with parent project.
-
-### 3.3. Forks are created as internal projects under current project
-
-Instead of creating projects across organizations, the forks are attachments to existing projects.
-Each user forking a project receives their unique project. Example:
-
-- For project: `gitlab.com/gitlab-org/gitlab`, forks would be created in `gitlab.com/gitlab-org/gitlab/@kamil-gitlab`.
-- Forks are created in a context of current organization, they do not cross organization boundaries
- and are managed by the organization.
+- The purpose of Organizations is to provide strong isolation between Organizations. Allowing to fork across does break security boundaries.
+- However, this is no different to the ability of users today to clone a repository to a local computer and push it to any repository of choice.
+- Access control of source Project can be lower than those of target Project. Today, the system requires that in order to contribute back, the access level needs to be the same for fork and upstream.
+
+### 3.2. Forks are created in a Personal Namespace of the current Organization
+
+Instead of creating Projects across Organizations, forks are created in a user's Personal Namespace tied to the Organization. Example:
+
+- Each user that is part of an Organization receives their Personal Namespace. For example for `GitLab Inc.` it could be `gitlab.com/organization/gitlab-inc/@ayufan`.
+- The user has to fork into their own Personal Namespace of the Organization.
+- The user has as many Personal Namespaces as Organizations they belongs to.
+- The Personal Namespace behaves similar to the currently offered Personal Namespace.
+- The user can manage and create Projects within a Personal Namespace.
+- The Organization can prevent or disable usage of Personal Namespaces, disallowing forks.
+- All current forks are migrated into the Personal Namespace of user in an Organization.
+- All forks are part of the Organization.
+- Forks are not federated features.
+- The Personal Namespace and forked Project do not share configuration with the parent Project.
+
+### 3.3. Forks are created as internal Projects under current Projects
+
+Instead of creating Projects across Organizations, forks are attachments to existing Projects.
+Each user forking a Project receives their unique Project. Example:
+
+- For Project: `gitlab.com/gitlab-org/gitlab`, forks would be created in `gitlab.com/gitlab-org/gitlab/@kamil-gitlab`.
+- Forks are created in the context of the current Organization, they do not cross Organization boundaries and are managed by the Organization.
- Tied to the user (or any other user-provided name of the fork).
-- The forks are not federated features.
+- Forks are not federated features.
Downsides:
-- Does not answer how to handle and migrate all exisiting forks.
-- Might share current group / project settings - breaking some security boundaries.
+- Does not answer how to handle and migrate all existing forks.
+- Might share current Group/Project settings, which could be breaking some security boundaries.
## 4. Evaluation
diff --git a/doc/architecture/blueprints/cells/cells-feature-data-migration.md b/doc/architecture/blueprints/cells/cells-feature-data-migration.md
index ef0865b4081..9ff661ddf68 100644
--- a/doc/architecture/blueprints/cells/cells-feature-data-migration.md
+++ b/doc/architecture/blueprints/cells/cells-feature-data-migration.md
@@ -6,15 +6,6 @@ description: 'Cells: Data migration'
<!-- vale gitlab.FutureTense = NO -->
-DISCLAIMER:
-This page may contain information related to upcoming products, features and
-functionality. It is important to note that the information presented is for
-informational purposes only, so please do not rely on the information for
-purchasing or planning purposes. Just like with all projects, the items
-mentioned on the page are subject to change or delay, and the development,
-release, and timing of any products, features, or functionality remain at the
-sole discretion of GitLab Inc.
-
This document is a work-in-progress and represents a very early state of the
Cells design. Significant aspects are not documented, though we expect to add
them in the future. This is one possible architecture for Cells, and we intend to
@@ -24,26 +15,18 @@ we can document the reasons for not choosing this approach.
# Cells: Data migration
-It is essential for Cells architecture to provide a way to migrate data out of big Cells
-into smaller ones. This describes various approaches to provide this type of split.
-
-We also need to handle for cases where data is already violating the expected
-isolation constraints of Cells (ie. references cannot span multiple
-organizations). We know that existing features like linked issues allowed users
-to link issues across any projects regardless of their hierarchy. There are many
-similar features. All of this data will need to be migrated in some way before
-it can be split across different cells. This may mean some data needs to be
-deleted, or the feature changed and modelled slightly differently before we can
-properly split or migrate the organizations between cells.
-
-Having schema deviations across different Cells, which is a necessary
-consequence of different databases, will also impact our ability to migrate
-data between cells. Different schemas impact our ability to reliably replicate
-data across cells and especially impact our ability to validate that the data is
-correctly replicated. It might force us to only be able to move data between
-cells when the schemas are all in sync (slowing down deployments and the
-rebalancing process) or possibly only migrate from newer to older schemas which
-would be complex.
+It is essential for a Cells architecture to provide a way to migrate data out of big Cells into smaller ones.
+This document describes various approaches to provide this type of split.
+
+We also need to handle cases where data is already violating the expected isolation constraints of Cells, for example references cannot span multiple Organizations.
+We know that existing features like linked issues allowed users to link issues across any Projects regardless of their hierarchy.
+There are many similar features.
+All of this data will need to be migrated in some way before it can be split across different Cells.
+This may mean some data needs to be deleted, or the feature needs to be changed and modelled slightly differently before we can properly split or migrate Organizations between Cells.
+
+Having schema deviations across different Cells, which is a necessary consequence of different databases, will also impact our ability to migrate data between Cells.
+Different schemas impact our ability to reliably replicate data across Cells and especially impact our ability to validate that the data is correctly replicated.
+It might force us to only be able to move data between Cells when the schemas are all in sync (slowing down deployments and the rebalancing process) or possibly only migrate from newer to older schemas which would be complex.
## 1. Definition
@@ -53,34 +36,27 @@ would be complex.
### 3.1. Split large Cells
-A single Cell can only be divided into many Cells. This is based on principle
-that it is easier to create exact clone of an existing Cell in many replicas
-out of which some will be made authoritative once migrated. Keeping those
-replicas up-to date with Cell 0 is also much easier due to pre-existing
-replication solutions that can replicate the whole systems: Geo, PostgreSQL
-physical replication, etc.
+A single Cell can only be divided into many Cells.
+This is based on the principle that it is easier to create an exact clone of an existing Cell in many replicas out of which some will be made authoritative once migrated.
+Keeping those replicas up-to-date with Cell 0 is also much easier due to pre-existing replication solutions that can replicate the whole systems: Geo, PostgreSQL physical replication, etc.
-1. All data of an organization needs to not be divided across many Cells.
+1. All data of an Organization needs to not be divided across many Cells.
1. Split should be doable online.
1. New Cells cannot contain pre-existing data.
1. N Cells contain exact replica of Cell 0.
1. The data of Cell 0 is live replicated to as many Cells it needs to be split.
-1. Once consensus is achieved between Cell 0 and N-Cells the organizations to be migrated away
- are marked as read-only cluster-wide.
-1. The `routes` is updated on for all organizations to be split to indicate an authoritative
- Cell holding the most recent data, like `gitlab-org` on `cell-100`.
-1. The data for `gitlab-org` on Cell 0, and on other non-authoritative N-Cells are dormant
- and will be removed in the future.
-1. All accesses to `gitlab-org` on a given Cell are validated about `cell_id` of `routes`
- to ensure that given Cell is authoritative to handle the data.
+1. Once consensus is achieved between Cell 0 and N-Cells, the Organizations to be migrated away are marked as read-only cluster-wide.
+1. The `routes` is updated on for all Organizations to be split to indicate an authoritative Cell holding the most recent data, like `gitlab-org` on `cell-100`.
+1. The data for `gitlab-org` on Cell 0, and on other non-authoritative N-Cells are dormant and will be removed in the future.
+1. All accesses to `gitlab-org` on a given Cell are validated about `cell_id` of `routes` to ensure that given Cell is authoritative to handle the data.
#### More challenges of this proposal
1. There is no streaming replication capability for Elasticsearch, but you could
snapshot the whole Elasticsearch index and recreate, but this takes hours.
- It could be handled by pausing Elasticsearch indexing on the initial cell during
+ It could be handled by pausing Elasticsearch indexing on the initial Cell during
the migration as indexing downtime is not a big issue, but this still needs
- to be coordinated with the migration process
+ to be coordinated with the migration process.
1. Syncing Redis, Gitaly, CI Postgres, Main Postgres, registry Postgres, other
new data stores snapshots in an online system would likely lead to gaps
without a long downtime. You need to choose a sync point and at the sync
@@ -88,39 +64,31 @@ physical replication, etc.
there are to migrate at the same time the longer the write downtime for the
failover. We would also need to find a reliable place in the application to
actually block updates to all these systems with a high degree of
- confidence. In the past we've only been confident by shutting down all rails
- services because any rails process could write directly to any of these at
+ confidence. In the past we've only been confident by shutting down all Rails
+ services because any Rails process could write directly to any of these at
any time due to async workloads or other surprising code paths.
1. How to efficiently delete all the orphaned data. Locating all `ci_builds`
- associated with half the organizations would be very expensive if we have to
+ associated with half the Organizations would be very expensive if we have to
do joins. We haven't yet determined if we'd want to store an `organization_id`
column on every table, but this is the kind of thing it would be helpful for.
-### 3.2. Migrate organization from an existing Cell
-
-This is different to split, as we intend to perform logical and selective replication
-of data belonging to a single organization.
+### 3.2. Migrate Organization from an existing Cell
-Today this type of selective replication is only implemented by Gitaly where we can migrate
-Git repository from a single Gitaly node to another with minimal downtime.
+This is different to split, as we intend to perform logical and selective replication of data belonging to a single Organization.
+Today this type of selective replication is only implemented by Gitaly where we can migrate Git repository from a single Gitaly node to another with minimal downtime.
-In this model we would require identifying all resources belonging to a given organization:
-database rows, object storage files, Git repositories, etc. and selectively copy them over
-to another (likely) existing Cell importing data into it. Ideally ensuring that we can
-perform logical replication live of all changed data, but change similarly to split
-which Cell is authoritative for this organization.
+In this model we would require identifying all resources belonging to a given Organization: database rows, object storage files, Git repositories, etc. and selectively copy them over to another (likely) existing Cell importing data into it.
+Ideally ensuring that we can perform logical replication live of all changed data, but change similarly to split which Cell is authoritative for this Organization.
-1. It is hard to identify all resources belonging to organization.
-1. It requires either downtime for organization or a robust system to identify
- live changes made.
-1. It likely will require a full database structure analysis (more robust than project import/export)
- to perform selective PostgreSQL logical replication.
+1. It is hard to identify all resources belonging to an Organization.
+1. It requires either downtime for the Organization or a robust system to identify live changes made.
+1. It likely will require a full database structure analysis (more robust than Project import/export) to perform selective PostgreSQL logical replication.
#### More challenges of this proposal
1. Logical replication is still not performant enough to keep up with our
scale. Even if we could use logical replication we still don't have an
- efficient way to filter data related to a single organization without
+ efficient way to filter data related to a single Organization without
joining all the way to the `organizations` table which will slow down
logical replication dramatically.
diff --git a/doc/architecture/blueprints/cells/cells-feature-database-sequences.md b/doc/architecture/blueprints/cells/cells-feature-database-sequences.md
index 43301a2b57f..2aeaaed7d64 100644
--- a/doc/architecture/blueprints/cells/cells-feature-database-sequences.md
+++ b/doc/architecture/blueprints/cells/cells-feature-database-sequences.md
@@ -6,15 +6,6 @@ description: 'Cells: Database Sequences'
<!-- vale gitlab.FutureTense = NO -->
-DISCLAIMER:
-This page may contain information related to upcoming products, features and
-functionality. It is important to note that the information presented is for
-informational purposes only, so please do not rely on the information for
-purchasing or planning purposes. Just like with all projects, the items
-mentioned on the page are subject to change or delay, and the development,
-release, and timing of any products, features, or functionality remain at the
-sole discretion of GitLab Inc.
-
This document is a work-in-progress and represents a very early state of the
Cells design. Significant aspects are not documented, though we expect to add
them in the future. This is one possible architecture for Cells, and we intend to
@@ -24,16 +15,10 @@ we can document the reasons for not choosing this approach.
# Cells: Database Sequences
-GitLab today ensures that every database row create has unique ID, allowing
-to access Merge Request, CI Job or Project by a known global ID.
-
-Cells will use many distinct and not connected databases, each of them having
-a separate IDs for most of entities.
-
-At a minimum, any ID referenced by `gitlab_main_clusterwide` table to a `gitlab_main_cell` table record will need to be unique across the cluster to avoid ambiguous references.
-
-Further to required global IDs, it might also be desirable to retain globally unique IDs for all database rows
-to allow migrating resources between Cells in the future.
+GitLab today ensures that every database row create has a unique ID, allowing to access a merge request, CI Job or Project by a known global ID.
+Cells will use many distinct and not connected databases, each of them having a separate ID for most entities.
+At a minimum, any ID referenced between a Cell and the shared schema will need to be unique across the cluster to avoid ambiguous references.
+Further to required global IDs, it might also be desirable to retain globally unique IDs for all database rows to allow migrating resources between Cells in the future.
## 1. Definition
@@ -41,54 +26,46 @@ to allow migrating resources between Cells in the future.
## 3. Proposal
-This are some preliminary ideas how we can retain unique IDs across the system.
+These are some preliminary ideas how we can retain unique IDs across the system.
### 3.1. UUID
-Instead of using incremental sequences use UUID (128 bit) that is stored in database.
+Instead of using incremental sequences, use UUID (128 bit) that is stored in the database.
-- This might break existing IDs and requires adding UUID column for all existing tables.
+- This might break existing IDs and requires adding a UUID column for all existing tables.
- This makes all indexes larger as it requires storing 128 bit instead of 32/64 bit in index.
### 3.2. Use Cell index encoded in ID
-Since significant number of tables already use 64 bit ID numbers we could use MSB to encode
-Cell ID effectively enabling
+Because a significant number of tables already use 64 bit ID numbers we could use MSB to encode the Cell ID:
-- This might limit amount of Cells that can be enabled in system, as we might decide to only
- allocate 1024 possible Cell numbers.
-- This might make IDs to be migratable between Cells, since even if entity from Cell 1 is migrated to Cell 100
- this ID would still be unique.
-- If resources are migrated the ID itself will not be enough to decode Cell number and we would need
- lookup table.
+- This might limit the amount of Cells that can be enabled in a system, as we might decide to only allocate 1024 possible Cell numbers.
+- This would make it possible to migrate IDs between Cells, because even if an entity from Cell 1 is migrated to Cell 100 this ID would still be unique.
+- If resources are migrated the ID itself will not be enough to decode the Cell number and we would need a lookup table.
- This requires updating all IDs to 32 bits.
### 3.3. Allocate sequence ranges from central place
-Each Cell might receive its own range of the sequences as they are consumed from a centrally managed place.
-Once Cell consumes all IDs assigned for a given table it would be replenished and a next range would be allocated.
+Each Cell might receive its own range of sequences as they are consumed from a centrally managed place.
+Once a Cell consumes all IDs assigned for a given table it would be replenished and a next range would be allocated.
Ranges would be tracked to provide a faster lookup table if a random access pattern is required.
-- This might make IDs to be migratable between Cells, since even if entity from Cell 1 is migrated to Cell 100
- this ID would still be unique.
-- If resources are migrated the ID itself will not be enough to decode Cell number and we would need
- much more robust lookup table as we could be breaking previously assigned sequence ranges.
+- This might make IDs migratable between Cells, because even if an entity from Cell 1 is migrated to Cell 100 this ID would still be unique.
+- If resources are migrated the ID itself will not be enough to decode the Cell number and we would need a much more robust lookup table as we could be breaking previously assigned sequence ranges.
- This does not require updating all IDs to 64 bits.
-- This adds some performance penalty to all `INSERT` statements in Postgres or at least from Rails as we need to check for the sequence number and potentially wait for our range to be refreshed from the ID server
+- This adds some performance penalty to all `INSERT` statements in Postgres or at least from Rails as we need to check for the sequence number and potentially wait for our range to be refreshed from the ID server.
- The available range will need to be stored and incremented in a centralized place so that concurrent transactions cannot possibly get the same value.
### 3.4. Define only some tables to require unique IDs
-Maybe this is acceptable only for some tables to have a globally unique IDs. It could be projects, groups
-and other top-level entities. All other tables like `merge_requests` would only offer Cell-local ID,
-but when referenced outside it would rather use IID (an ID that is monotonic in context of a given resource, like project).
+Maybe it is acceptable only for some tables to have a globally unique IDs. It could be Projects, Groups and other top-level entities.
+All other tables like `merge_requests` would only offer a Cell-local ID, but when referenced outside it would rather use an IID (an ID that is monotonic in context of a given resource, like a Project).
-- This makes the ID 10000 for `merge_requests` be present on all Cells, which might be sometimes confusing
- as for uniqueness of the resource.
-- This might make random access by ID (if ever needed) be impossible without using composite key, like: `project_id+merge_request_id`.
-- This would require us to implement a transformation/generation of new ID if we need to migrate records to another cell. This can lead to very difficult migration processes when these IDs are also used as foreign keys for other records being migrated.
-- If IDs need to change when moving between cells this means that any links to records by ID would no longer work even if those links included the `project_id`.
-- If we plan to allow these ids to not be unique and change the unique constraint to be based on a composite key then we'd need to update all foreign key references to be based on the composite key
+- This makes the ID 10000 for `merge_requests` be present on all Cells, which might be sometimes confusing regarding the uniqueness of the resource.
+- This might make random access by ID (if ever needed) impossible without using a composite key, like: `project_id+merge_request_id`.
+- This would require us to implement a transformation/generation of new ID if we need to migrate records to another Cell. This can lead to very difficult migration processes when these IDs are also used as foreign keys for other records being migrated.
+- If IDs need to change when moving between Cells this means that any links to records by ID would no longer work even if those links included the `project_id`.
+- If we plan to allow these IDs to not be unique and change the unique constraint to be based on a composite key then we'd need to update all foreign key references to be based on the composite key.
## 4. Evaluation
diff --git a/doc/architecture/blueprints/cells/cells-feature-git-access.md b/doc/architecture/blueprints/cells/cells-feature-git-access.md
index 70b3f136904..611b4db5f43 100644
--- a/doc/architecture/blueprints/cells/cells-feature-git-access.md
+++ b/doc/architecture/blueprints/cells/cells-feature-git-access.md
@@ -15,35 +15,30 @@ we can document the reasons for not choosing this approach.
# Cells: Git Access
-This document describes impact of Cells architecture on all Git access (over HTTPS and SSH)
-patterns providing explanation of how potentially those features should be changed
-to work well with Cells.
+This document describes impact of Cells architecture on all Git access (over HTTPS and SSH) patterns providing explanation of how potentially those features should be changed to work well with Cells.
## 1. Definition
-Git access is done through out the application. It can be an operation performed by the system
-(read Git repository) or by user (create a new file via Web IDE, `git clone` or `git push` via command line).
+Git access is done throughout the application.
+It can be an operation performed by the system (read Git repository) or by a user (create a new file via Web IDE, `git clone` or `git push` via command line).
+The Cells architecture defines that all Git repositories will be local to the Cell, so no repository could be shared with another Cell.
-The Cells architecture defines that all Git repositories will be local to the Cell,
-so no repository could be shared with another Cell.
-
-The Cells architecture will require that any Git operation done can only be handled by a Cell holding
-the data. It means that any operation either via Web interface, API, or GraphQL needs to be routed
-to the correct Cell. It means that any `git clone` or `git push` operation can only be performed
-in a context of a Cell.
+The Cells architecture will require that any Git operation can only be handled by a Cell holding the data.
+It means that any operation either via Web interface, API, or GraphQL needs to be routed to the correct Cell.
+It means that any `git clone` or `git push` operation can only be performed in the context of a Cell.
## 2. Data flow
-The are various operations performed today by the GitLab on a Git repository. This describes
-the data flow how they behave today to better represent the impact.
+The are various operations performed today by GitLab on a Git repository.
+This describes the data flow how they behave today to better represent the impact.
-It appears that Git access does require changes only to a few endpoints that are scoped to project.
+It appears that Git access does require changes only to a few endpoints that are scoped to a Project.
There appear to be different types of repositories:
- Project: assigned to Group
- Wiki: additional repository assigned to Project
- Design: similar to Wiki, additional repository assigned to Project
-- Snippet: creates a virtual project to hold repository, likely tied to the User
+- Snippet: creates a virtual Project to hold repository, likely tied to the User
### 2.1. Git clone over HTTPS
@@ -131,9 +126,8 @@ sequenceDiagram
## 3. Proposal
-The Cells stateless router proposal requires that any ambiguous path (that is not routable)
-will be made to be routable. It means that at least the following paths will have to be updated
-do introduce a routable entity (project, group, or organization).
+The Cells stateless router proposal requires that any ambiguous path (that is not routable) will be made routable.
+It means that at least the following paths will have to be updated to introduce a routable entity (Project, Group, or Organization).
Change:
@@ -150,9 +144,7 @@ Where:
## 4. Evaluation
Supporting Git repositories if a Cell can access only its own repositories does not appear to be complex.
-
-The one major complication is supporting snippets, but this likely falls in the same category as for the approach
-to support user's personal namespaces.
+The one major complication is supporting snippets, but this likely falls in the same category as for the approach to support a user's Personal Namespace.
## 4.1. Pros
@@ -161,4 +153,4 @@ to support user's personal namespaces.
## 4.2. Cons
1. The sharing of repositories objects is limited to the given Cell and Gitaly node.
-1. The across-Cells forks are likely impossible to be supported (discover: how this work today across different Gitaly node).
+1. Cross-Cells forks are likely impossible to be supported (discover: How this works today across different Gitaly node).
diff --git a/doc/architecture/blueprints/cells/cells-feature-global-search.md b/doc/architecture/blueprints/cells/cells-feature-global-search.md
index c1e2b93bc2d..475db381ff5 100644
--- a/doc/architecture/blueprints/cells/cells-feature-global-search.md
+++ b/doc/architecture/blueprints/cells/cells-feature-global-search.md
@@ -6,15 +6,6 @@ description: 'Cells: Global search'
<!-- vale gitlab.FutureTense = NO -->
-DISCLAIMER:
-This page may contain information related to upcoming products, features and
-functionality. It is important to note that the information presented is for
-informational purposes only, so please do not rely on the information for
-purchasing or planning purposes. Just like with all projects, the items
-mentioned on the page are subject to change or delay, and the development,
-release, and timing of any products, features, or functionality remain at the
-sole discretion of GitLab Inc.
-
This document is a work-in-progress and represents a very early state of the
Cells design. Significant aspects are not documented, though we expect to add
them in the future. This is one possible architecture for Cells, and we intend to
@@ -24,12 +15,9 @@ we can document the reasons for not choosing this approach.
# Cells: Global search
-When we introduce multiple Cells we intend to isolate all services related to
-those Cells. This will include Elasticsearch which means our current global
-search functionality will not work. It may be possible to implement aggregated
-search across all cells, but it is unlikely to be performant to do fan-out
-searches across all cells especially once you start to do pagination which
-requires setting the correct offset and page number for each search.
+When we introduce multiple Cells we intend to isolate all services related to those Cells.
+This will include Elasticsearch which means our current global search functionality will not work.
+It may be possible to implement aggregated search across all Cells, but it is unlikely to be performant to do fan-out searches across all Cells especially once you start to do pagination which requires setting the correct offset and page number for each search.
## 1. Definition
@@ -37,9 +25,8 @@ requires setting the correct offset and page number for each search.
## 3. Proposal
-Likely first versions of Cells will simply not support global searches and then
-we may later consider if building global searches to support popular use cases
-is worthwhile.
+Likely the first versions of Cells will not support global searches.
+Later, we may consider if building global searches to support popular use cases is worthwhile.
## 4. Evaluation
diff --git a/doc/architecture/blueprints/cells/cells-feature-graphql.md b/doc/architecture/blueprints/cells/cells-feature-graphql.md
index d936a1b81ba..e8850dfbee3 100644
--- a/doc/architecture/blueprints/cells/cells-feature-graphql.md
+++ b/doc/architecture/blueprints/cells/cells-feature-graphql.md
@@ -6,15 +6,6 @@ description: 'Cells: GraphQL'
<!-- vale gitlab.FutureTense = NO -->
-DISCLAIMER:
-This page may contain information related to upcoming products, features and
-functionality. It is important to note that the information presented is for
-informational purposes only, so please do not rely on the information for
-purchasing or planning purposes. Just like with all projects, the items
-mentioned on the page are subject to change or delay, and the development,
-release, and timing of any products, features, or functionality remain at the
-sole discretion of GitLab Inc.
-
This document is a work-in-progress and represents a very early state of the
Cells design. Significant aspects are not documented, though we expect to add
them in the future. This is one possible architecture for Cells, and we intend to
@@ -25,9 +16,8 @@ we can document the reasons for not choosing this approach.
# Cells: GraphQL
GitLab extensively uses GraphQL to perform efficient data query operations.
-GraphQL due to it's nature is not directly routable. The way how GitLab uses
-it calls the `/api/graphql` endpoint, and only query or mutation of body request
-might define where the data can be accessed.
+GraphQL due to it's nature is not directly routable.
+The way GitLab uses it calls the `/api/graphql` endpoint, and only the query or mutation of the body request might define where the data can be accessed.
## 1. Definition
@@ -35,21 +25,19 @@ might define where the data can be accessed.
## 3. Proposal
-There are at least two main ways to implement GraphQL in Cells architecture.
+There are at least two main ways to implement GraphQL in a Cells architecture.
### 3.1. GraphQL routable by endpoint
Change `/api/graphql` to `/api/organization/<organization>/graphql`.
-- This breaks all existing usages of `/api/graphql` endpoint
- since the API URI is changed.
+- This breaks all existing usages of `/api/graphql` endpoint because the API URI is changed.
### 3.2. GraphQL routable by body
As part of router parse GraphQL body to find a routable entity, like `project`.
-- This still makes the GraphQL query be executed only in context of a given Cell
- and not allowing the data to be merged.
+- This still makes the GraphQL query be executed only in context of a given Cell and not allowing the data to be merged.
```json
# Good example
@@ -71,11 +59,9 @@ As part of router parse GraphQL body to find a routable entity, like `project`.
### 3.3. Merging GraphQL Proxy
-Implement as part of router GraphQL Proxy which can parse body
-and merge results from many Cells.
+Implement as part of router GraphQL Proxy which can parse body and merge results from many Cells.
-- This might make pagination hard to achieve, or we might assume that
- we execute many queries of which results are merged across all Cells.
+- This might make pagination hard to achieve, or we might assume that we execute many queries of which results are merged across all Cells.
```json
{
diff --git a/doc/architecture/blueprints/cells/cells-feature-organizations.md b/doc/architecture/blueprints/cells/cells-feature-organizations.md
index 03178d9e6ce..e66144237be 100644
--- a/doc/architecture/blueprints/cells/cells-feature-organizations.md
+++ b/doc/architecture/blueprints/cells/cells-feature-organizations.md
@@ -6,15 +6,6 @@ description: 'Cells: Organizations'
<!-- vale gitlab.FutureTense = NO -->
-DISCLAIMER:
-This page may contain information related to upcoming products, features and
-functionality. It is important to note that the information presented is for
-informational purposes only, so please do not rely on the information for
-purchasing or planning purposes. Just like with all projects, the items
-mentioned on the page are subject to change or delay, and the development,
-release, and timing of any products, features, or functionality remain at the
-sole discretion of GitLab Inc.
-
This document is a work-in-progress and represents a very early state of the
Cells design. Significant aspects are not documented, though we expect to add
them in the future. This is one possible architecture for Cells, and we intend to
@@ -24,29 +15,21 @@ we can document the reasons for not choosing this approach.
# Cells: Organizations
-One of the major designs of Cells architecture is strong isolation between Groups.
-Organizations as described by this blueprint provides a way to have plausible UX
-for joining together many Groups that are isolated from the rest of systems.
+One of the major designs of a Cells architecture is strong isolation between Groups.
+Organizations as described by the [Organization blueprint](../organization/index.md) provides a way to have plausible UX for joining together many Groups that are isolated from the rest of the system.
## 1. Definition
-Cells do require that all groups and projects of a single organization can
-only be stored on a single Cell since a Cell can only access data that it holds locally
-and has very limited capabilities to read information from other Cells.
+Cells do require that all Groups and Projects of a single Organization can only be stored on a single Cell because a Cell can only access data that it holds locally and has very limited capabilities to read information from other Cells.
-Cells with Organizations do require strong isolation between organizations.
+Cells with Organizations do require strong isolation between Organizations.
-It will have significant implications on various user-facing features,
-like Todos, dropdowns allowing to select projects, references to other issues
-or projects, or any other social functions present at GitLab. Today those functions
-were able to reference anything in the whole system. With the introduction of
-organizations such will be forbidden.
+It will have significant implications on various user-facing features, like Todos, dropdowns allowing to select Projects, references to other issues or Projects, or any other social functions present at GitLab.
+Today those functions were able to reference anything in the whole system.
+With the introduction of Organizations this will be forbidden.
-This problem definition aims to answer effort and implications required to add
-strong isolation between organizations to the system. Including features affected
-and their data processing flow. The purpose is to ensure that our solution when
-implemented consistently avoids data leakage between organizations residing on
-a single Cell.
+This problem definition aims to answer effort and implications required to add strong isolation between Organizations to the system, including features affected and their data processing flow.
+The purpose is to ensure that our solution when implemented consistently avoids data leakage between Organizations residing on a single Cell.
## 2. Data flow
diff --git a/doc/architecture/blueprints/cells/cells-feature-router-endpoints-classification.md b/doc/architecture/blueprints/cells/cells-feature-router-endpoints-classification.md
index 7c2974ca258..d403d6ff963 100644
--- a/doc/architecture/blueprints/cells/cells-feature-router-endpoints-classification.md
+++ b/doc/architecture/blueprints/cells/cells-feature-router-endpoints-classification.md
@@ -6,15 +6,6 @@ description: 'Cells: Router Endpoints Classification'
<!-- vale gitlab.FutureTense = NO -->
-DISCLAIMER:
-This page may contain information related to upcoming products, features and
-functionality. It is important to note that the information presented is for
-informational purposes only, so please do not rely on the information for
-purchasing or planning purposes. Just like with all projects, the items
-mentioned on the page are subject to change or delay, and the development,
-release, and timing of any products, features, or functionality remain at the
-sole discretion of GitLab Inc.
-
This document is a work-in-progress and represents a very early state of the
Cells design. Significant aspects are not documented, though we expect to add
them in the future. This is one possible architecture for Cells, and we intend to
@@ -24,15 +15,11 @@ we can document the reasons for not choosing this approach.
# Cells: Router Endpoints Classification
-Classification of all endpoints is essential to properly route request
-hitting load balancer of a GitLab installation to a Cell that can serve it.
-
-Each Cell should be able to decode each request and classify for which Cell
-it belongs to.
+Classification of all endpoints is essential to properly route requests hitting the load balancer of a GitLab installation to a Cell that can serve it.
+Each Cell should be able to decode each request and classify which Cell it belongs to.
-GitLab currently implements hundreds of endpoints. This document tries
-to describe various techniques that can be implemented to allow the Rails
-to provide this information efficiently.
+GitLab currently implements hundreds of endpoints.
+This document tries to describe various techniques that can be implemented to allow the Rails to provide this information efficiently.
## 1. Definition
diff --git a/doc/architecture/blueprints/cells/cells-feature-schema-changes.md b/doc/architecture/blueprints/cells/cells-feature-schema-changes.md
index d712b24a8a0..dd0f6c0705c 100644
--- a/doc/architecture/blueprints/cells/cells-feature-schema-changes.md
+++ b/doc/architecture/blueprints/cells/cells-feature-schema-changes.md
@@ -6,15 +6,6 @@ description: 'Cells: Schema changes'
<!-- vale gitlab.FutureTense = NO -->
-DISCLAIMER:
-This page may contain information related to upcoming products, features and
-functionality. It is important to note that the information presented is for
-informational purposes only, so please do not rely on the information for
-purchasing or planning purposes. Just like with all projects, the items
-mentioned on the page are subject to change or delay, and the development,
-release, and timing of any products, features, or functionality remain at the
-sole discretion of GitLab Inc.
-
This document is a work-in-progress and represents a very early state of the
Cells design. Significant aspects are not documented, though we expect to add
them in the future. This is one possible architecture for Cells, and we intend to
@@ -24,24 +15,15 @@ we can document the reasons for not choosing this approach.
# Cells: Schema changes
-When we introduce multiple Cells that own their own databases this will
-complicate the process of making schema changes to Postgres and Elasticsearch.
-Today we already need to be careful to make changes comply with our zero
-downtime deployments. For example,
-[when removing a column we need to make changes over 3 separate deployments](../../../development/database/avoiding_downtime_in_migrations.md#dropping-columns).
-We have tooling like `post_migrate` that helps with these kinds of changes to
-reduce the number of merge requests needed, but these will be complicated when
-we are dealing with deploying multiple rails applications that will be at
-different versions at any one time. This problem will be particularly tricky to
-solve for shared databases like our plan to share the `users` related tables
-among all Cells.
-
-A key benefit of Cells may be that it allows us to run different
-customers on different versions of GitLab. We may choose to update our own cell
-before all our customers giving us even more flexibility than our current
-canary architecture. But doing this means that schema changes need to have even
-more versions of backward compatibility support which could slow down
-development as we need extra steps to make schema changes.
+When we introduce multiple Cells that own their own databases this will complicate the process of making schema changes to Postgres and Elasticsearch.
+Today we already need to be careful to make changes comply with our zero downtime deployments.
+For example, [when removing a column we need to make changes over 3 separate deployments](../../../development/database/avoiding_downtime_in_migrations.md#dropping-columns).
+We have tooling like `post_migrate` that helps with these kinds of changes to reduce the number of merge requests needed, but these will be complicated when we are dealing with deploying multiple Rails applications that will be at different versions at any one time.
+This problem will be particularly tricky to solve for shared databases like our plan to share the `users` related tables among all Cells.
+
+A key benefit of Cells may be that it allows us to run different customers on different versions of GitLab.
+We may choose to update our own Cell before all our customers giving us even more flexibility than our current canary architecture.
+But doing this means that schema changes need to have even more versions of backward compatibility support which could slow down development as we need extra steps to make schema changes.
## 1. Definition
diff --git a/doc/architecture/blueprints/cells/cells-feature-secrets.md b/doc/architecture/blueprints/cells/cells-feature-secrets.md
index 50ccf926b4d..681c229711d 100644
--- a/doc/architecture/blueprints/cells/cells-feature-secrets.md
+++ b/doc/architecture/blueprints/cells/cells-feature-secrets.md
@@ -15,32 +15,26 @@ we can document the reasons for not choosing this approach.
# Cells: Secrets
-Where possible, each cell should have its own distinct set of secrets.
-However, there will be some secrets that will be required to be the same for all
-cells in the cluster
+Where possible, each Cell should have its own distinct set of secrets.
+However, there will be some secrets that will be required to be the same for all Cells in the cluster.
## 1. Definition
-GitLab has a lot of
-[secrets](https://docs.gitlab.com/charts/installation/secrets.html) that needs
-to be configured.
-
-Some secrets are for inter-component communication, for example, `GitLab Shell secret`,
-and used only within a cell.
-
+GitLab has a lot of [secrets](https://docs.gitlab.com/charts/installation/secrets.html) that need to be configured.
+Some secrets are for inter-component communication, for example, `GitLab Shell secret`, and used only within a Cell.
Some secrets are used for features, for example, `ci_jwt_signing_key`.
## 2. Data flow
## 3. Proposal
-1. Secrets used for features will need to be consistent across all cells, so that the UX is consistent.
+1. Secrets used for features will need to be consistent across all Cells, so that the UX is consistent.
1. This is especially true for the `db_key_base` secret which is used for
- encrypting data at rest in the database - so that projects that are
- transferred to another cell will continue to work. We do not want to have
- to re-encrypt such rows when we move projects/groups between cells.
-1. Secrets which are used for intra-cell communication only should be uniquely generated
- per-cell.
+ encrypting data at rest in the database - so that Projects that are
+ transferred to another Cell will continue to work. We do not want to have
+ to re-encrypt such rows when we move Projects/Groups between Cells.
+1. Secrets which are used for intra-Cell communication only should be uniquely generated
+ per Cell.
## 4. Evaluation
diff --git a/doc/architecture/blueprints/cells/index.md b/doc/architecture/blueprints/cells/index.md
index 53b7758dbd3..60628580e28 100644
--- a/doc/architecture/blueprints/cells/index.md
+++ b/doc/architecture/blueprints/cells/index.md
@@ -14,7 +14,7 @@ participating-stages: []
This document is a work-in-progress and represents a very early state of the Cells design. Significant aspects are not documented, though we expect to add them in the future.
-Cells is a new architecture for our Software as a Service platform. This architecture is horizontally-scalable, resilient, and provides a more consistent user experience. It may also provide additional features in the future, such as data residency control (regions) and federated features.
+Cells is a new architecture for our software as a service platform. This architecture is horizontally scalable, resilient, and provides a more consistent user experience. It may also provide additional features in the future, such as data residency control (regions) and federated features.
For more information about Cells, see also:
@@ -28,8 +28,7 @@ We can't ship the entire Cells architecture in one go - it is too large.
Instead, we are defining key work streams required by the project.
Not all objectives need to be fulfilled to reach production readiness.
-It is expected that some objectives will not be completed for General Availability (GA),
-but will be enough to run Cells in production.
+It is expected that some objectives will not be completed for General Availability (GA), but will be enough to run Cells in production.
### 1. Data access layer
@@ -54,7 +53,7 @@ Under this objective the following steps are expected:
1. **Cluster-unique identifiers**
- Every object has a unique identifier that can be used to access data across the cluster. The IDs for allocated projects, issues and any other objects are cluster-unique.
+ Every object has a unique identifier that can be used to access data across the cluster. The IDs for allocated Projects, issues and any other objects are cluster-unique.
1. **Cluster-wide deletions**
@@ -62,7 +61,7 @@ Under this objective the following steps are expected:
1. **Data access layer**
- Ensure that a stable data-access (versioned) layer that allows to share cluster-wide data is implemented.
+ Ensure that a stable data access (versioned) layer is implemented that allows to share cluster-wide data.
1. **Database migration**
@@ -70,48 +69,38 @@ Under this objective the following steps are expected:
### 2. Essential workflows
-To make Cells viable we require to define and support
-essential workflows before we can consider the Cells
-to be of Beta quality. Essential workflows are meant
-to cover the majority of application functionality
-that makes the product mostly useable, but with some caveats.
+To make Cells viable we require to define and support essential workflows before we can consider the Cells to be of Beta quality.
+Essential workflows are meant to cover the majority of application functionality that makes the product mostly useable, but with some caveats.
The current approach is to define workflows from top to bottom.
The order defines the presumed priority of the items.
-This list is not exhaustive as we would be expecting
-other teams to help and fix their workflows after
-the initial phase, in which we fix the fundamental ones.
-
-To consider a project ready for the Beta phase, it is expected
-that all features defined below are supported by Cells.
-In the cases listed below, the workflows define a set of tables
-to be properly attributed to the feature. In some cases,
-a table with an ambiguous usage has to be broken down.
-For example: `uploads` are used to store user avatars,
-as well as uploaded attachments for comments. It would be expected
-that `uploads` is split into `uploads` (describing group/project-level attachments)
-and `global_uploads` (describing, for example, user avatars).
-
-Except for initial 2-3 quarters this work is highly parallel.
-It would be expected that **group::tenant scale** would help other
-teams to fix their feature set to work with Cells. The first 2-3 quarters
-would be required to define a general split of data and build required tooling.
+This list is not exhaustive as we would be expecting other teams to help and fix their workflows after the initial phase, in which we fix the fundamental ones.
+
+To consider a project ready for the Beta phase, it is expected that all features defined below are supported by Cells.
+In the cases listed below, the workflows define a set of tables to be properly attributed to the feature.
+In some cases, a table with an ambiguous usage has to be broken down.
+For example: `uploads` are used to store user avatars, as well as uploaded attachments for comments.
+It would be expected that `uploads` is split into `uploads` (describing Group/Project-level attachments) and `global_uploads` (describing, for example, user avatars).
+
+Except for the initial 2-3 quarters this work is highly parallel.
+It is expected that **group::tenant scale** will help other teams to fix their feature set to work with Cells.
+The first 2-3 quarters are required to define a general split of data and build the required tooling.
1. **Instance-wide settings are shared across cluster.**
- The Admin Area section for most part is shared across a cluster.
+ The Admin Area section for the most part is shared across a cluster.
1. **User accounts are shared across cluster.**
The purpose is to make `users` cluster-wide.
-1. **User can create group.**
+1. **User can create Group.**
- The purpose is to perform a targeted decomposition of `users` and `namespaces`, because the `namespaces` will be stored locally in the Cell.
+ The purpose is to perform a targeted decomposition of `users` and `namespaces`, because `namespaces` will be stored locally in the Cell.
-1. **User can create project.**
+1. **User can create Project.**
- The purpose is to perform a targeted decomposition of `users` and `projects`, because the `projects` will be stored locally in the Cell.
+ The purpose is to perform a targeted decomposition of `users` and `projects`, because `projects` will be stored locally in the Cell.
1. **User can change profile avatar that is shared in cluster.**
@@ -119,7 +108,7 @@ would be required to define a general split of data and build required tooling.
1. **User can push to Git repository.**
- The purpose is to ensure that essential joins from the projects table are properly attributed to be
+ The purpose is to ensure that essential joins from the Projects table are properly attributed to be
Cell-local, and as a result the essential Git workflow is supported.
1. **User can run CI pipeline.**
@@ -130,26 +119,26 @@ would be required to define a general split of data and build required tooling.
The purpose is to ensure that `issues` and `merge requests` are properly attributed to be `Cell-local`.
-1. **User can manage group and project members.**
+1. **User can manage Group and Project members.**
The `members` table is properly attributed to be either `Cell-local` or `cluster-wide`.
1. **User can manage instance-wide runners.**
- The purpose is to scope all CI Runners to be Cell-local. Instance-wide runners in fact become Cell-local runners. The expectation is to provide a user interface view and manage all runners per Cell, instead of per cluster.
+ The purpose is to scope all CI runners to be Cell-local. Instance-wide runners in fact become Cell-local runners. The expectation is to provide a user interface view and manage all runners per Cell, instead of per cluster.
-1. **User is part of organization and can only see information from the organization.**
+1. **User is part of Organization and can only see information from the Organization.**
- The purpose is to have many organizations per Cell, but never have a single organization spanning across many Cells. This is required to ensure that information shown within an organization is isolated, and does not require fetching information from other Cells.
+ The purpose is to have many Organizations per Cell, but never have a single Organization spanning across many Cells. This is required to ensure that information shown within an Organization is isolated, and does not require fetching information from other Cells.
### 3. Additional workflows
Some of these additional workflows might need to be supported, depending on the group decision.
This list is not exhaustive of work needed to be done.
-1. **User can use all group-level features.**
-1. **User can use all project-level features.**
-1. **User can share groups with other groups in an organization.**
+1. **User can use all Group-level features.**
+1. **User can use all Project-level features.**
+1. **User can share Groups with other Groups in an Organization.**
1. **User can create system webhook.**
1. **User can upload and manage packages.**
1. **User can manage security detection features.**
@@ -158,13 +147,11 @@ This list is not exhaustive of work needed to be done.
### 4. Routing layer
-The routing layer is meant to offer a consistent user experience where all Cells are presented
-under a single domain (for example, `gitlab.com`), instead of
-having to navigate to separate domains.
+The routing layer is meant to offer a consistent user experience where all Cells are presented under a single domain (for example, `gitlab.com`), instead of having to navigate to separate domains.
-The user will able to use `https://gitlab.com` to access Cell-enabled GitLab. Depending
-on the URL access, it will be transparently proxied to the correct Cell that can serve this particular
-information. For example:
+The user will be able to use `https://gitlab.com` to access Cell-enabled GitLab.
+Depending on the URL access, it will be transparently proxied to the correct Cell that can serve this particular information.
+For example:
- All requests going to `https://gitlab.com/users/sign_in` are randomly distributed to all Cells.
- All requests going to `https://gitlab.com/gitlab-org/gitlab/-/tree/master` are always directed to Cell 5, for example.
@@ -173,9 +160,8 @@ information. For example:
1. **Technology.**
We decide what technology the routing service is written in.
- The choice is dependent on the best performing language, and the expected way
- and place of deployment of the routing layer. If it is required to make
- the service multi-cloud it might be required to deploy it to the CDN provider.
+ The choice is dependent on the best performing language, and the expected way and place of deployment of the routing layer.
+ If it is required to make the service multi-cloud it might be required to deploy it to the CDN provider.
Then the service needs to be written using a technology compatible with the CDN provider.
1. **Cell discovery.**
@@ -184,35 +170,29 @@ information. For example:
1. **Router endpoints classification.**
- The stateless routing service will fetch and cache information about endpoints
- from one of the Cells. We need to implement a protocol that will allow us to
- accurately describe the incoming request (its fingerprint), so it can be classified
- by one of the Cells, and the results of that can be cached. We also need to implement
- a mechanism for negative cache and cache eviction.
+ The stateless routing service will fetch and cache information about endpoints from one of the Cells.
+ We need to implement a protocol that will allow us to accurately describe the incoming request (its fingerprint), so it can be classified by one of the Cells, and the results of that can be cached.
+ We also need to implement a mechanism for negative cache and cache eviction.
1. **GraphQL and other ambiguous endpoints.**
- Most endpoints have a unique sharding key: the organization, which directly
- or indirectly (via a group or project) can be used to classify endpoints.
- Some endpoints are ambiguous in their usage (they don't encode the sharding key),
- or the sharding key is stored deep in the payload. In these cases, we need to decide how to handle endpoints like `/api/graphql`.
+ Most endpoints have a unique sharding key: the Organization, which directly or indirectly (via a Group or Project) can be used to classify endpoints.
+ Some endpoints are ambiguous in their usage (they don't encode the sharding key), or the sharding key is stored deep in the payload.
+ In these cases, we need to decide how to handle endpoints like `/api/graphql`.
### 5. Cell deployment
-We will run many Cells. To manage them easier, we need to have consistent
-deployment procedures for Cells, including a way to deploy, manage, migrate,
-and monitor.
+We will run many Cells.
+To manage them easier, we need to have consistent deployment procedures for Cells, including a way to deploy, manage, migrate, and monitor.
-We are very likely to use tooling made for [GitLab Dedicated](https://about.gitlab.com/dedicated/)
-with its control planes.
+We are very likely to use tooling made for [GitLab Dedicated](https://about.gitlab.com/dedicated/) with its control planes.
1. **Extend GitLab Dedicated to support GCP.**
1. TBD
### 6. Migration
-When we reach production and are able to store new organizations on new Cells, we need
-to be able to divide big Cells into many smaller ones.
+When we reach production and are able to store new Organizations on new Cells, we need to be able to divide big Cells into many smaller ones.
1. **Use GitLab Geo to clone Cells.**
@@ -220,14 +200,13 @@ to be able to divide big Cells into many smaller ones.
1. **Split Cells by cloning them.**
- Once Cell is cloned we change routing information for organizations.
- Organization will encode `cell_id`. When we update `cell_id` it will automatically
- make the given Cell to be authoritative to handle the traffic for the given organization.
+ Once a Cell is cloned we change the routing information for Organizations.
+ Organizations will encode a `cell_id`.
+ When we update the `cell_id` it will automatically make the given Cell authoritative to handle traffic for the given Organization.
1. **Delete redundant data from previous Cells.**
- Since the organization is now stored on many Cells, once we change `cell_id`
- we will have to remove data from all other Cells based on `organization_id`.
+ Since the Organization is now stored on many Cells, once we change `cell_id` we will have to remove data from all other Cells based on `organization_id`.
## Availability of the feature
@@ -237,11 +216,10 @@ We are following the [Support for Experiment, Beta, and Generally Available feat
Expectations:
-- We can deploy a Cell on staging or another testing environment by using a separate domain (ex. `cell2.staging.gitlab.com`)
- using [Cell deployment](#5-cell-deployment) tooling.
-- User can create organization, group and project, and run some of the [essential workflows](#2-essential-workflows).
+- We can deploy a Cell on staging or another testing environment by using a separate domain (for example `cell2.staging.gitlab.com`) using [Cell deployment](#5-cell-deployment) tooling.
+- User can create Organization, Group and Project, and run some of the [essential workflows](#2-essential-workflows).
- It is not expected to be able to run a router to serve all requests under a single domain.
-- We expect data-loss of data stored on additional Cells.
+- We expect data loss of data stored on additional Cells.
- We expect to tear down and create many new Cells to validate tooling.
### 2. Beta
@@ -250,7 +228,7 @@ Expectations:
- We can run many Cells under a single domain (ex. `staging.gitlab.com`).
- All features defined in [essential workflows](#2-essential-workflows) are supported.
-- Not all aspects of [Routing layer](#4-routing-layer) are finalized.
+- Not all aspects of the [routing layer](#4-routing-layer) are finalized.
- We expect additional Cells to be stable with minimal data loss.
### 3. GA
@@ -259,53 +237,49 @@ Expectations:
- We can run many Cells under a single domain (for example, `staging.gitlab.com`).
- All features defined in [essential workflows](#2-essential-workflows) are supported.
-- All features of [routing layer](#4-routing-layer) are supported.
-- Most of [additional workflows](#3-additional-workflows) are supported.
-- We don't expect to support any of [migration](#6-migration) aspects.
+- All features of the [routing layer](#4-routing-layer) are supported.
+- Most of the [additional workflows](#3-additional-workflows) are supported.
+- We don't expect to support any of the [migration](#6-migration) aspects.
### 4. Post GA
Expectations:
- We support all [additional workflows](#3-additional-workflows).
-- We can [migrate](#6-migration) existing organizations onto new Cells.
+- We can [migrate](#6-migration) existing Organizations onto new Cells.
## Iteration plan
-The delivered iterations will focus on solving particular steps of a given
-key work stream.
-
-It is expected that initial iterations will rather
-be slow, because they require substantially more
-changes to prepare the codebase for data split.
+The delivered iterations will focus on solving particular steps of a given key work stream.
+It is expected that initial iterations will be rather slow, because they require substantially more changes to prepare the codebase for data split.
One iteration describes one quarter's worth of work.
-1. [Iteration 1](https://gitlab.com/groups/gitlab-org/-/epics/9667) - FY24Q1
+1. [Iteration 1](https://gitlab.com/groups/gitlab-org/-/epics/9667) - FY24Q1 - Complete
- Data access layer: Initial Admin Area settings are shared across cluster.
- Essential workflows: Allow to share cluster-wide data with database-level data access layer
-1. [Iteration 2](https://gitlab.com/groups/gitlab-org/-/epics/9813) - FY24Q2
+1. [Iteration 2](https://gitlab.com/groups/gitlab-org/-/epics/9813) - FY24Q2 - In progress
- Essential workflows: User accounts are shared across cluster.
- - Essential workflows: User can create group.
+ - Essential workflows: User can create Group.
-1. [Iteration 3](https://gitlab.com/groups/gitlab-org/-/epics/10997) - FY24Q3
+1. [Iteration 3](https://gitlab.com/groups/gitlab-org/-/epics/10997) - FY24Q3 - Planned
- - Essential workflows: User can create project.
- - Essential workflows: User can push to Git repository.
- - Cell deployment: Extend GitLab Dedicated to support GCP
+ - Essential workflows: User can create Project.
- Routing: Technology.
1. [Iteration 4](https://gitlab.com/groups/gitlab-org/-/epics/10998) - FY24Q4
+ - Essential workflows: User can push to Git repository.
- Essential workflows: User can run CI pipeline.
- Essential workflows: User can create issue, merge request, and merge it after it is green.
- Data access layer: Evaluate the efficiency of database-level access vs. API-oriented access layer
- Data access layer: Cluster-unique identifiers.
- Routing: Cell discovery.
- Routing: Router endpoints classification.
+ - Cell deployment: Extend GitLab Dedicated to support GCP
1. Iteration 5 - FY25Q1
@@ -313,17 +287,16 @@ One iteration describes one quarter's worth of work.
## Technical Proposals
-The Cells architecture do have long lasting implications to data processing, location, scalability and the GitLab architecture.
+The Cells architecture has long lasting implications to data processing, location, scalability and the GitLab architecture.
This section links all different technical proposals that are being evaluated.
- [Stateless Router That Uses a Cache to Pick Cell and Is Redirected When Wrong Cell Is Reached](proposal-stateless-router-with-buffering-requests.md)
-
- [Stateless Router That Uses a Cache to Pick Cell and pre-flight `/api/v4/cells/learn`](proposal-stateless-router-with-routes-learning.md)
## Impacted features
The Cells architecture will impact many features requiring some of them to be rewritten, or changed significantly.
-This is the list of known affected features with the proposed solutions.
+This is the list of known affected features with proposed solutions.
- [Cells: Admin Area](cells-feature-admin-area.md)
- [Cells: Agent for Kubernetes](cells-feature-agent-for-kubernetes.md)
@@ -352,28 +325,23 @@ This is the list of known affected features with the proposed solutions.
### What's the difference between Cells architecture and GitLab Dedicated?
-The new Cells architecture is meant to scale GitLab.com. And the way to achieve this is by moving
-organizations into cells, but different organizations can still share each other server resources, even
-if the application provides isolation from other organizations. But all of them still operate under the
-existing GitLab SaaS domain name `gitlab.com`. Also, cells still share some common data, like `users`, and
-routing information of groups and projects. For example, no two users can have the same username
-even if they belong to different organizations that exist on different cells.
+The new Cells architecture is meant to scale GitLab.com.
+The way to achieve this is by moving Organizations into Cells, but different Organizations can still share server resources, even if the application provides isolation from other Organizations.
+But all of them still operate under the existing GitLab SaaS domain name `gitlab.com`.
+Also, Cells still share some common data, like `users`, and routing information of Groups and Projects.
+For example, no two users can have the same username even if they belong to different Organizations that exist on different Cells.
-With the aforementioned differences, GitLab Dedicated is still offered at higher costs due to the fact
-that it's provisioned via dedicated server resources for each customer, while Cells use shared resources. Which
-makes GitLab Dedicated more suited for bigger customers, and GitLab Cells more suitable for small to mid size
-companies that are starting on GitLab.com.
+With the aforementioned differences, [GitLab Dedicated](https://about.gitlab.com/dedicated/) is still offered at higher costs due to the fact that it's provisioned via dedicated server resources for each customer, while Cells use shared resources.
+This makes GitLab Dedicated more suited for bigger customers, and GitLab Cells more suitable for small to mid-size companies that are starting on GitLab.com.
-On the other hand, [GitLab Dedicated](https://about.gitlab.com/dedicated/) is meant to provide completely
-isolated GitLab instance for any organization. Where this instance is running on its own custom domain name, and
-totally isolated from any other GitLab instance, including GitLab SaaS. For example, users on GitLab dedicated
-don't have to have a different and unique username that was already taken on GitLab.com.
+On the other hand, GitLab Dedicated is meant to provide a completely isolated GitLab instance for any Organization.
+This instance is running on its own custom domain name, and is totally isolated from any other GitLab instance, including GitLab SaaS.
+For example, users on GitLab Dedicated don't have to have a different and unique username that was already taken on GitLab.com.
-### Can different cells communicate with each other?
+### Can different Cells communicate with each other?
-Up until iteration 3, cells communicate with each other only via a shared database that contains common
-data. In iteration 4 we are going to evaluate the option of cells calling each other via API to provide more
-isolation and reliability.
+Up until iteration 3, Cells communicate with each other only via a shared database that contains common data.
+In iteration 4 we are going to evaluate the option of Cells calling each other via API to provide more isolation and reliability.
## Decision log
@@ -384,6 +352,6 @@ isolation and reliability.
- [Internal Pods presentation](https://docs.google.com/presentation/d/1x1uIiN8FR9fhL7pzFh9juHOVcSxEY7d2_q4uiKKGD44/edit#slide=id.ge7acbdc97a_0_155)
- [Internal link to all diagrams](https://drive.google.com/file/d/13NHzbTrmhUM-z_Bf0RjatUEGw5jWHSLt/view?usp=sharing)
- [Cells Epic](https://gitlab.com/groups/gitlab-org/-/epics/7582)
-- [Database Group investigation](https://about.gitlab.com/handbook/engineering/development/enablement/data_stores/database/doc/root-namespace-sharding.html)
+- [Database group investigation](https://about.gitlab.com/handbook/engineering/development/enablement/data_stores/database/doc/root-namespace-sharding.html)
- [Shopify Pods architecture](https://shopify.engineering/a-pods-architecture-to-allow-shopify-to-scale)
- [Opstrace architecture](https://gitlab.com/gitlab-org/opstrace/opstrace/-/blob/main/docs/architecture/overview.md)
diff --git a/doc/development/secure_coding_guidelines.md b/doc/development/secure_coding_guidelines.md
index 2c85cb19fab..186239cc547 100644
--- a/doc/development/secure_coding_guidelines.md
+++ b/doc/development/secure_coding_guidelines.md
@@ -1450,7 +1450,7 @@ Logging helps track events for debugging. Logging also allows the application to
### What should not be captured in the logs
-- Personal user information.
+- Personal data, except for integer-based identifiers and UUIDs, or IP address, which can be logged when necessary.
- Credentials like access tokens or passwords. If credentials must be captured for debugging purposes, log the internal ID of the credential (if available) instead. Never log credentials under any circumstances.
- When [debug logging](../ci/variables/index.md#enable-debug-logging) is enabled, all masked CI/CD variables are visible in job logs. Consider using [protected variables](../ci/variables/index.md#protect-a-cicd-variable) when possible so that sensitive CI/CD variables are only available to pipelines running on protected branches or protected tags.
- Any data supplied by the user without proper validation.
diff --git a/doc/tutorials/create_register_first_runner/index.md b/doc/tutorials/create_register_first_runner/index.md
new file mode 100644
index 00000000000..09487c2378f
--- /dev/null
+++ b/doc/tutorials/create_register_first_runner/index.md
@@ -0,0 +1,167 @@
+---
+stage: Verify
+group: Runner
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Tutorial: Create, register, and run your own project runner **(FREE)**
+
+This tutorial shows you how to configure and run your first runner in GitLab.
+
+A runner is an agent in the GitLab Runner application that runs jobs in a GitLab CI/CD pipeline.
+Jobs are defined in the `.gitlab-ci.yml` file and assigned to available runners.
+
+GitLab has three types of runners:
+
+- Shared: Available to all groups and projects in a GitLab instance.
+- Group: Available to all projects and subgroups in a group.
+- Project: Associated with specific projects. Typically, project runners are used by one project at a time.
+
+For this tutorial, you'll create a project runner to run jobs defined in a basic pipeline
+configuration:
+
+1. [Create a blank project](#create-a-blank-project)
+1. [Create a project pipeline](#create-a-project-pipeline).
+1. [Create and register a project runner](#create-and-register-a-project-runner).
+1. [Trigger a pipeline to run your runner](#trigger-a-pipeline-to-run-your-runner).
+
+## Prerequisite
+
+Before you can create, register, and run a runner, [GitLab Runner](https://docs.gitlab.com/runner/install/) must be installed on a local computer.
+
+## Create a blank project
+
+First, create a blank project where you can create your CI/CD pipeline and runner.
+
+To create a blank project:
+
+1. On the left sidebar, at the top, select **Create new** (**{plus}**) and **New project/repository**.
+1. Select **Create blank project**.
+1. Enter the project details:
+ - In the **Project name** field, enter the name of your project. The name must start with a lowercase or uppercase letter (`a-zA-Z`), digit (`0-9`), emoji, or underscore (`_`). It can also contain dots (`.`), pluses (`+`), dashes (`-`), or spaces.
+ - In the **Project slug** field, enter the path to your project. The GitLab instance uses the
+ slug as the URL path to the project. To change the slug, first enter the project name,
+ then change the slug.
+1. Select **Create project**.
+
+## Create a project pipeline
+
+Next, create a `.gitlab-ci.yml` file for your project. This is a YAML file where you specify instructions for GitLab CI/CD.
+
+In this file, you define:
+
+- The structure and order of jobs that the runner should execute.
+- The decisions the runner should make when specific conditions are encountered.
+
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Project overview**.
+1. Select the plus icon (**{plus}**), then select **New file**.
+1. In the **Filename** field, enter `.gitlab-ci.yml`.
+1. In the large text box, paste this sample configuration:
+
+ ```yaml
+ stages:
+ - build
+ - test
+
+ job_build:
+ stage: build
+ script:
+ - echo "Building the project"
+
+ job_test:
+ stage: test
+ script:
+ - echo "Running tests"
+ ```
+
+ In this configuration there are two jobs that the runner runs: a build job and a test job.
+1. Select **Commit changes**.
+
+## Create and register a project runner
+
+Next, create a project runner and register it. You must register the runner to link it
+to GitLab so that it can pick up jobs from the project pipeline.
+
+To create a project runner:
+
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > CI/CD**.
+1. Expand the **Runners** section.
+1. Select **New project runner**.
+1. Select your operating system.
+1. In the **Tags** section, select the **Run untagged** checkbox. [Tags](../../ci/runners/configure_runners.md#use-tags-to-control-which-jobs-a-runner-can-run) specify which jobs
+ the runner can run and are optional.
+1. Select **Create runner**.
+1. Follow the on-screen instructions to register the runner from the command line. When prompted:
+ - For `executor`, because your runner will run directly on the host computer, enter `shell`. The [executor](https://docs.gitlab.com/runner/executors/)
+ is the environment where the runner executes the job.
+ - For `GitLab instance URL`, use the URL for your GitLab instance. For example, if your project
+ is hosted on `gitlab.example.com/yourname/yourproject`, then your GitLab instance URL is `https://gitlab.example.com`.
+ If your project is hosted on GitLab.com, the URL is `https://gitlab.com`.
+1. Start your runner:
+
+ ```shell
+ gitlab-runner run
+ ```
+
+### Check the runner configuration file
+
+After you register the runner, the configuration and authentication token is saved to your `config.toml`. The runner uses the
+token to authenticate with GitLab when picking up jobs from the job queue.
+
+You can use the `config.toml` to
+define more [advanced runner configurations](https://docs.gitlab.com/runner/configuration/advanced-configuration.html).
+
+Here's what your `config.toml` should look like after you register and start the runner:
+
+```toml
+ [[runners]]
+ name = "my-project-runner1"
+ url = "http://127.0.0.1:3000"
+ id = 38
+ token = "glrt-TOKEN"
+ token_obtained_at = 2023-07-05T08:56:33Z
+ token_expires_at = 0001-01-01T00:00:00Z
+ executor = "shell"
+```
+
+## Trigger a pipeline to run your runner
+
+Next, trigger a pipeline in your project so you can view your runner execute a job.
+
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Build > Pipelines**.
+1. Select **Run pipeline**.
+1. Select a job to view the job log. The output should look similar to this example, which shows
+ your runner successfully executing the job:
+
+ ```shell
+ Running with gitlab-runner 16.2.0 (782e15da)
+ on my-project-runner TOKEN, system ID: SYSTEM ID
+ Preparing the "shell" executor
+ 00:00
+ Using Shell (bash) executor...
+ Preparing environment
+ 00:00
+ /Users/username/.bash_profile: line 9: setopt: command not found
+ Running on MACHINE-NAME...
+ Getting source from Git repository
+ 00:01
+ /Users/username/.bash_profile: line 9: setopt: command not found
+ Fetching changes with git depth set to 20...
+ Reinitialized existing Git repository in /Users/username/project-repository
+ Checking out 7226fc70 as detached HEAD (ref is main)...
+ Skipping object checkout, Git LFS is not installed for this repository.
+ Consider installing it with 'git lfs install'.
+ Skipping Git submodules setup
+ Executing "step_script" stage of the job script
+ 00:00
+ /Users/username/.bash_profile: line 9: setopt: command not found
+ $ echo "Building the project"
+ Building the project
+ Job succeeded
+
+ ```
+
+You have now successfully created, registered, and run your first runner!
diff --git a/doc/tutorials/website_project_with_analytics/index.md b/doc/tutorials/website_project_with_analytics/index.md
new file mode 100644
index 00000000000..3ae33fa4f41
--- /dev/null
+++ b/doc/tutorials/website_project_with_analytics/index.md
@@ -0,0 +1,162 @@
+---
+stage: Plan
+group: Optimize
+info: For assistance with this tutorial, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments-to-other-projects-and-subjects.
+---
+
+# Tutorial: Set up an analytics-powered website project **(ULTIMATE)**
+
+When you work on a complex project (for example, a website), you likely collaborate with other people to build and maintain it.
+The way you collaborate and communicate in your team can make or break the project, so you want processes in place that help team members follow and achieve the common goal.
+Analytics metrics help you understand how the team is doing, and if you need to adjust processes so you can work better together.
+GitLab provides different types of [analytics](../../user/analytics/index.md) insights at the instance, group, and project level.
+If this list seems long and you're not sure where to start, then this tutorial is for you.
+
+Follow along to learn how to set up an example website project, collaborate with other GitLab users,
+and use project-level analytics reports to evaluate the development of your project.
+
+Prerequisite:
+
+- You must have the Owner role for the group in which you create the project.
+
+Here's an overview of what we're going to do:
+
+1. Create a project from a template.
+1. Invite users to the project.
+1. Create project labels.
+1. Create a value stream with a custom stage.
+1. Create an Insights report.
+1. View merge request and issue analytics.
+
+## Create a project from a template
+
+First of all, you need to create a project in your group.
+
+GitLab provides project templates,
+which make it easier to set up a project with all the necessary files for various use cases.
+Here, you'll create a project for a Hugo website.
+
+To create a project:
+
+1. On the left sidebar, at the top, select **Create new** (**{plus}**) and **New project/repository**.
+1. Select **Create from template**.
+1. Select the **Pages/Hugo** template.
+1. In the **Project name** text box, enter a name (for example `My website`).
+1. From the **Project URL** dropdown list, select the group you want to create the project in.
+1. In the **Project slug** text box, enter a slug for your project (for example, `my-website`).
+1. Optional. In the **Project description** text box, enter a description of your project.
+ For example, "Analytics-powered project for a website built with Hugo". You can add or edit this description at any time.
+1. Under **Visibility Level**, select the desired level for the project.
+ If you create the project in a group, the visibility setting for a project must be at least as restrictive as the visibility of its parent group.
+1. Select **Create project**.
+
+Now you have a project with all the files you need for a Hugo website.
+
+## Invite users to the project
+
+When working on a large project such as a website, you'll likely need to collaborate with other people,
+such as developers and designers.
+You have to invite them to your project, so that they get access to all the files, issues, and reports.
+
+To invite a user to the `My website` project:
+
+1. In the project, select **Manage > Members**.
+1. Select **Invite members**.
+1. Enter the user's **username**.
+1. From the **Role** dropdown list, select the **Developer** role or higher.
+ Users must have at least the Developer role to view analytics and contribute to issues and merge requests.
+1. Optional. In the **Access expiration date** picker, select a date.
+ This step is recommended if the invited member is expected to contribute to the project only for a limited time.
+1. Select **Invite**.
+
+The invited user should now be a member of the project.
+You can [view, filter, and search for members](../../user/project/members/index.md#filter-and-sort-members) of your project.
+
+## Create project labels
+
+[Labels](../../user/project/labels.md) help you organize and track issues, merge requests, and epics.
+You can create as many labels as you need for your projects and groups.
+For example, for a website project like this one, the labels `feature request` and `bug` might be useful.
+
+To create a project label, in the `My website` project:
+
+1. Select **Manage > Labels**.
+1. Select **New label**.
+1. In the **Title** field, enter `feature request`.
+1. Optional. In the **Description** field, enter additional information about how and when to use this label.
+1. Optional. Select a color by selecting from the available colors, or enter a hex color value for a specific color in the **Background color** field.
+1. Select **Create label**.
+
+The label should now appear in the [label list](../../user/project/labels.md#view-project-labels),
+and you can use it to create a value stream with a custom stage.
+
+## Create a value stream with a custom stage
+
+Now that you have a project with collaborators, you can start tracking and visualizing the activity.
+[Value Stream Analytics](../../user/group/value_stream_analytics/index.md) helps you measure the time it takes
+to go from an idea to production, and identify inefficiencies in the development process.
+
+To get started, create a value stream in the `My website` project:
+
+1. Select **Analyze > Value Stream Analytics**.
+1. Select **Create new Value Stream**.
+1. Enter a name for the value stream, for example `My website value stream`.
+1. Select **Create from default template**.
+1. To add a custom stage, select **Add another stage**.
+ - Enter a name for the stage, for example `Labeled MRs merged`.
+ - From the **Start event** dropdown list, select **Merge request label was added**, then the `feature request` label.
+ - From the **Stop event** dropdown list, select **Merge request merged**.
+1. Select **Create value stream**.
+
+After you create the value stream, data starts collecting and loading.
+This process might take a while. When it's ready, the dashboard is displayed in **Analyze > Value Stream Analytics**.
+
+In the meantime, you can start creating an Insights report for your project.
+
+## Create an Insights report
+
+While Value Stream Analytics give an overview of the entire development process,
+[Insights](../../user/project/insights/index.md) provide a more granular view of a project's
+issues created and closed, and average merge time of merge requests.
+This data visualization can help you triage issues at a glance.
+
+You can create as many Insights reports with different charts as you need.
+For example, a stacked bar chart for bugs by severity or a line chart for issues opened over the month.
+
+To create an Insights report, in the `My website` project:
+
+1. Above the file list, select the plus icon, then select **New file**.
+1. In the **File name** text box, enter `.gitlab/insights.yml`.
+1. In the large text box, enter the following code:
+
+ ```yaml
+ bugsCharts:
+ title: "Charts for bugs"
+ charts:
+ - title: "Monthly bugs created"
+ description: "Open bugs created per month"
+ type: bar
+ query:
+ data_source: issuables
+ params:
+ issuable_type: issue
+ issuable_state: opened
+ filter_labels:
+ - bug
+ group_by: month
+ period_limit: 12
+ ```
+
+1. Select **Commit changes**.
+
+Now you have an Insights bar chart that displays the number of issues with the label `~bug` created per month, for the past 12 months.
+You and project members with at least the Developer role can view the Insights report in **Analyze > Insights**.
+
+## View merge request and issue analytics
+
+In addition to the Insights reports, you can get detailed analytics on the merge requests and issues of your project.
+[Merge request analytics](../../user/analytics/merge_request_analytics.md) and [Issue analytics](../../user/analytics/issue_analytics.md) display charts and tables with metrics such as assignees, merge request throughput, and issue status.
+
+To view merge request and issue analytics, in the `My website` project, select **Analyze > Merge request analytics** or **Analyze > Issue analytics**.
+
+That was it! Now you have an analytics-powered website project on which you can collaborate efficiently with your team.
diff --git a/doc/user/project/import/gitea.md b/doc/user/project/import/gitea.md
index 22c89084c56..dbf89f1ee9c 100644
--- a/doc/user/project/import/gitea.md
+++ b/doc/user/project/import/gitea.md
@@ -57,7 +57,7 @@ GitLab access your repositories:
1. Select **Generate Token**.
1. Copy the token hash.
1. Go back to GitLab and provide the token to the Gitea importer.
-1. Select **List Your Gitea Repositories** and wait while GitLab reads
+1. Select **List your Gitea repositories** and wait while GitLab reads
your repositories' information. After it's done, GitLab displays the importer
page to select the repositories to import.
diff --git a/doc/user/project/repository/push_rules.md b/doc/user/project/repository/push_rules.md
index 81896d64815..31257ee7811 100644
--- a/doc/user/project/repository/push_rules.md
+++ b/doc/user/project/repository/push_rules.md
@@ -6,6 +6,8 @@ info: "To determine the technical writer assigned to the Stage/Group associated
# Push rules **(PREMIUM)**
+> Maximum regular expression length for push rules [changed](https://gitlab.com/gitlab-org/gitlab/-/issues/411901) from 255 to 511 characters in GitLab 16.3.
+
Push rules are [pre-receive Git hooks](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) you
can enable in a user-friendly interface. Push rules give you more control over what
can and can't be pushed to your repository. While GitLab offers
@@ -19,7 +21,7 @@ can and can't be pushed to your repository. While GitLab offers
GitLab uses [RE2 syntax](https://github.com/google/re2/wiki/Syntax) for regular expressions
in push rules. You can test them at the [regex101 regex tester](https://regex101.com/).
-Each regular expression is limited to 255 characters.
+Each regular expression is limited to 511 characters.
For custom push rules use [server hooks](../../../administration/server_hooks.md).
diff --git a/doc/user/usage_quotas.md b/doc/user/usage_quotas.md
index a2f436c74dc..4e604ba98be 100644
--- a/doc/user/usage_quotas.md
+++ b/doc/user/usage_quotas.md
@@ -112,6 +112,8 @@ Depending on your role, you can also use the following methods to manage or redu
- [Reduce repository size](project/repository/reducing_the_repo_size_using_git.md).
- [Reduce container registry storage](packages/container_registry/reduce_container_registry_storage.md).
- [Reduce wiki repository size](../administration/wikis/index.md#reduce-wiki-repository-size).
+- [Manage artifact expiration period](../ci/yaml/index.md#artifactsexpire_in).
+- [Reduce build artifact storage](../ci/jobs/job_artifacts.md#delete-job-log-and-artifacts).
## Manage your transfer usage
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 003906aa663..bc18812ad39 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -21374,10 +21374,10 @@ msgstr ""
msgid "Gitaly|Address"
msgstr ""
-msgid "Gitea Host URL"
+msgid "Gitea host URL"
msgstr ""
-msgid "Gitea Import"
+msgid "Gitea import"
msgstr ""
msgid "GithubImporter|%{noteable_type} comment %{note_id}"
@@ -23429,6 +23429,12 @@ msgstr ""
msgid "IdentityVerification|%{linkStart}Enter a new phone number%{linkEnd}"
msgstr ""
+msgid "IdentityVerification|A code has already been sent to this email address. Check your spam folder or enter another email address."
+msgstr ""
+
+msgid "IdentityVerification|A new code has been sent to your updated email address."
+msgstr ""
+
msgid "IdentityVerification|A new code has been sent."
msgstr ""
@@ -23450,6 +23456,9 @@ msgstr ""
msgid "IdentityVerification|Didn't receive a code? %{linkStart}Send a new code%{linkEnd}"
msgstr ""
+msgid "IdentityVerification|Email update is only offered once."
+msgstr ""
+
msgid "IdentityVerification|Enter a code."
msgstr ""
@@ -23504,6 +23513,9 @@ msgstr ""
msgid "IdentityVerification|Please enter a valid code"
msgstr ""
+msgid "IdentityVerification|Please enter a valid email address."
+msgstr ""
+
msgid "IdentityVerification|Resend code"
msgstr ""
@@ -23534,6 +23546,9 @@ msgstr ""
msgid "IdentityVerification|There was a problem with the credit card details you entered. Use a different credit card and try again."
msgstr ""
+msgid "IdentityVerification|Update email"
+msgstr ""
+
msgid "IdentityVerification|Verification code"
msgstr ""
@@ -23766,9 +23781,6 @@ msgstr[1] ""
msgid "Import CSV"
msgstr ""
-msgid "Import Projects from Gitea"
-msgstr ""
-
msgid "Import an exported GitLab project"
msgstr ""
@@ -23826,6 +23838,9 @@ msgstr ""
msgid "Import projects from GitLab.com"
msgstr ""
+msgid "Import projects from Gitea"
+msgstr ""
+
msgid "Import repositories from Bitbucket Server"
msgstr ""
@@ -28017,9 +28032,6 @@ msgstr ""
msgid "List"
msgstr ""
-msgid "List Your Gitea Repositories"
-msgstr ""
-
msgid "List available repositories"
msgstr ""
@@ -28050,6 +28062,9 @@ msgstr ""
msgid "List your Bitbucket Server repositories"
msgstr ""
+msgid "List your Gitea repositories"
+msgstr ""
+
msgid "Load more"
msgstr ""
@@ -32543,6 +32558,9 @@ msgstr ""
msgid "Only members of this group can access the wiki."
msgstr ""
+msgid "Only one security policy bot is allowed per project"
+msgstr ""
+
msgid "Only one source is required but both were provided"
msgstr ""
@@ -33730,6 +33748,9 @@ msgstr ""
msgid "Personal Access Token prefix"
msgstr ""
+msgid "Personal access token"
+msgstr ""
+
msgid "Personal project creation is not allowed. Please contact your administrator with questions"
msgstr ""
@@ -48663,7 +48684,7 @@ msgstr ""
msgid "To get started, click the link below to confirm your account."
msgstr ""
-msgid "To get started, please enter your Gitea Host URL and a %{link_to_personal_token}."
+msgid "To get started, please enter your Gitea host URL and a %{link_to_personal_token}."
msgstr ""
msgid "To get started, use the link below to confirm your account."
diff --git a/spec/features/users/email_verification_on_login_spec.rb b/spec/features/users/email_verification_on_login_spec.rb
index c9b1670be82..7675de28f86 100644
--- a/spec/features/users/email_verification_on_login_spec.rb
+++ b/spec/features/users/email_verification_on_login_spec.rb
@@ -5,7 +5,9 @@ require 'spec_helper'
RSpec.describe 'Email Verification On Login', :clean_gitlab_redis_rate_limiting, :js, feature_category: :system_access do
include EmailHelpers
- let_it_be(:user) { create(:user) }
+ let_it_be_with_reload(:user) { create(:user) }
+ let_it_be(:another_user) { create(:user) }
+ let_it_be(:new_email) { build_stubbed(:user).email }
let(:require_email_verification_enabled) { user }
@@ -97,6 +99,53 @@ RSpec.describe 'Email Verification On Login', :clean_gitlab_redis_rate_limiting,
end
end
+ describe 'updating the email address' do
+ it 'offers to update the email address' do
+ perform_enqueued_jobs do
+ # When logging in
+ gitlab_sign_in(user)
+
+ # Expect an instructions email to be sent with a code
+ code = expect_instructions_email_and_extract_code
+
+ # It shows an update email button
+ expect(page).to have_button s_('IdentityVerification|Update email')
+
+ # Click Update email button
+ click_button s_('IdentityVerification|Update email')
+
+ # Try to update with another user's email address
+ fill_in _('Email'), with: another_user.email
+ click_button s_('IdentityVerification|Update email')
+ expect(page).to have_content('Email has already been taken')
+
+ # Update to a unique email address
+ fill_in _('Email'), with: new_email
+ click_button s_('IdentityVerification|Update email')
+ expect(page).to have_content(s_('IdentityVerification|A new code has been sent to ' \
+ 'your updated email address.'))
+ expect_log_message('Instructions Sent', 2)
+
+ new_code = expect_email_changed_notification_to_old_address_and_instructions_email_to_new_address
+
+ # Verify the old code is different from the new code
+ expect(code).not_to eq(new_code)
+ verify_code(new_code)
+
+ # Expect the user to be unlocked
+ expect_user_to_be_unlocked
+ expect_user_to_be_confirmed
+
+ # When logging in again
+ gitlab_sign_out
+ gitlab_sign_in(user)
+
+ # It does not show an update email button anymore
+ expect(page).not_to have_button s_('IdentityVerification|Update email')
+ end
+ end
+ end
+
describe 'verification errors' do
it 'rate limits verifications' do
perform_enqueued_jobs do
@@ -339,6 +388,28 @@ RSpec.describe 'Email Verification On Login', :clean_gitlab_redis_rate_limiting,
end
end
+ def expect_user_to_be_confirmed
+ aggregate_failures do
+ expect(user.email).to eq(new_email)
+ expect(user.unconfirmed_email).to be_nil
+ end
+ end
+
+ def expect_email_changed_notification_to_old_address_and_instructions_email_to_new_address
+ changed_email = ActionMailer::Base.deliveries[0]
+ instructions_email = ActionMailer::Base.deliveries[1]
+
+ expect(changed_email.to).to match_array([user.email])
+ expect(changed_email.subject).to eq('Email Changed')
+
+ expect(instructions_email.to).to match_array([new_email])
+ expect(instructions_email.subject).to eq(s_('IdentityVerification|Verify your identity'))
+
+ reset_delivered_emails!
+
+ instructions_email.body.parts.first.to_s[/\d{#{Users::EmailVerification::GenerateTokenService::TOKEN_LENGTH}}/o]
+ end
+
def expect_instructions_email_and_extract_code
mail = find_email_for(user)
expect(mail.to).to match_array([user.email])
diff --git a/spec/frontend/search/sidebar/components/archived_filter_spec.js b/spec/frontend/search/sidebar/components/archived_filter_spec.js
index eaea731882e..2838cb60fa1 100644
--- a/spec/frontend/search/sidebar/components/archived_filter_spec.js
+++ b/spec/frontend/search/sidebar/components/archived_filter_spec.js
@@ -53,4 +53,20 @@ describe('ArchivedFilter', () => {
expect(findH5().text()).toBe(archivedFilterData.headerLabel);
});
});
+
+ describe.each`
+ include_archived | checkboxState
+ ${''} | ${'false'}
+ ${'false'} | ${'false'}
+ ${'true'} | ${'true'}
+ ${'sdfsdf'} | ${'false'}
+ `('selectedFilter', ({ include_archived, checkboxState }) => {
+ beforeEach(() => {
+ createComponent({ urlQuery: { include_archived } });
+ });
+
+ it('renders the component', () => {
+ expect(findCheckboxFilter().attributes('checked')).toBe(checkboxState);
+ });
+ });
});
diff --git a/spec/frontend/sessions/new/components/email_verification_spec.js b/spec/frontend/sessions/new/components/email_verification_spec.js
index 8ff139e8475..30ba2782f2f 100644
--- a/spec/frontend/sessions/new/components/email_verification_spec.js
+++ b/spec/frontend/sessions/new/components/email_verification_spec.js
@@ -6,11 +6,13 @@ import { s__ } from '~/locale';
import { createAlert, VARIANT_SUCCESS } from '~/alert';
import { HTTP_STATUS_NOT_FOUND, HTTP_STATUS_OK } from '~/lib/utils/http_status';
import EmailVerification from '~/sessions/new/components/email_verification.vue';
+import UpdateEmail from '~/sessions/new/components/update_email.vue';
import { visitUrl } from '~/lib/utils/url_utility';
import {
I18N_EMAIL_EMPTY_CODE,
I18N_EMAIL_INVALID_CODE,
I18N_GENERIC_ERROR,
+ I18N_UPDATE_EMAIL,
I18N_RESEND_LINK,
I18N_EMAIL_RESEND_SUCCESS,
} from '~/sessions/new/constants';
@@ -29,18 +31,22 @@ describe('EmailVerification', () => {
obfuscatedEmail: 'al**@g*****.com',
verifyPath: '/users/sign_in',
resendPath: '/users/resend_verification_code',
+ isOfferEmailReset: true,
+ updateEmailPath: '/users/update_email',
};
- const createComponent = () => {
+ const createComponent = (props = {}) => {
wrapper = mountExtended(EmailVerification, {
- propsData: defaultPropsData,
+ propsData: { ...defaultPropsData, ...props },
});
};
const findForm = () => wrapper.findComponent(GlForm);
const findCodeInput = () => wrapper.findComponent(GlFormInput);
+ const findUpdateEmail = () => wrapper.findComponent(UpdateEmail);
const findSubmitButton = () => wrapper.find('[type="submit"]');
const findResendLink = () => wrapper.findByText(I18N_RESEND_LINK);
+ const findUpdateEmailLink = () => wrapper.findByText(I18N_UPDATE_EMAIL);
const enterCode = (code) => findCodeInput().setValue(code);
const submitForm = () => findForm().trigger('submit');
@@ -202,4 +208,44 @@ describe('EmailVerification', () => {
expect(findCodeInput().element.value).toBe('');
});
});
+
+ describe('updating the email', () => {
+ it('contains the link to show the update email form', () => {
+ expect(findUpdateEmailLink().exists()).toBe(true);
+ });
+
+ describe('when the isOfferEmailReset property is set to false', () => {
+ beforeEach(() => {
+ createComponent({ isOfferEmailReset: false });
+ });
+
+ it('does not contain the link to show the update email form', () => {
+ expect(findUpdateEmailLink().exists()).toBe(false);
+ });
+ });
+
+ it('shows the UpdateEmail component when clicking the link', async () => {
+ expect(findUpdateEmail().exists()).toBe(false);
+
+ await findUpdateEmailLink().trigger('click');
+
+ expect(findUpdateEmail().exists()).toBe(true);
+ });
+
+ describe('when the UpdateEmail component triggers verifyToken', () => {
+ const newEmail = 'new@ema.il';
+
+ beforeEach(async () => {
+ enterCode('123');
+ await findUpdateEmailLink().trigger('click');
+ findUpdateEmail().vm.$emit('verifyToken', newEmail);
+ });
+
+ it('hides the UpdateEmail component, shows the updated email address and resets the form', () => {
+ expect(findUpdateEmail().exists()).toBe(false);
+ expect(wrapper.text()).toContain(newEmail);
+ expect(findCodeInput().element.value).toBe('');
+ });
+ });
+ });
});
diff --git a/spec/frontend/sessions/new/components/update_email_spec.js b/spec/frontend/sessions/new/components/update_email_spec.js
new file mode 100644
index 00000000000..37da8b56e9b
--- /dev/null
+++ b/spec/frontend/sessions/new/components/update_email_spec.js
@@ -0,0 +1,184 @@
+import { GlForm, GlFormInput } from '@gitlab/ui';
+import axios from 'axios';
+import MockAdapter from 'axios-mock-adapter';
+import { mountExtended } from 'helpers/vue_test_utils_helper';
+import { createAlert, VARIANT_SUCCESS } from '~/alert';
+import { HTTP_STATUS_NOT_FOUND, HTTP_STATUS_OK } from '~/lib/utils/http_status';
+import UpdateEmail from '~/sessions/new/components/update_email.vue';
+import {
+ I18N_CANCEL,
+ I18N_EMAIL_INVALID,
+ I18N_UPDATE_EMAIL_SUCCESS,
+ I18N_GENERIC_ERROR,
+ SUCCESS_RESPONSE,
+ FAILURE_RESPONSE,
+} from '~/sessions/new/constants';
+
+const validEmailAddress = 'valid@ema.il';
+const invalidEmailAddress = 'invalid@ema.il';
+
+jest.mock('~/alert');
+jest.mock('~/lib/utils/url_utility', () => ({
+ ...jest.requireActual('~/lib/utils/url_utility'),
+ visitUrl: jest.fn(),
+}));
+jest.mock('~/lib/utils/forms', () => ({
+ ...jest.requireActual('~/lib/utils/forms'),
+ isEmail: jest.fn().mockImplementation((email) => email === validEmailAddress),
+}));
+
+describe('EmailVerification', () => {
+ let wrapper;
+ let axiosMock;
+
+ const defaultPropsData = {
+ updateEmailPath: '/users/update_email',
+ };
+
+ const createComponent = (props = {}) => {
+ wrapper = mountExtended(UpdateEmail, {
+ propsData: { ...defaultPropsData, ...props },
+ });
+ };
+
+ const findForm = () => wrapper.findComponent(GlForm);
+ const findEmailInput = () => wrapper.findComponent(GlFormInput);
+ const findSubmitButton = () => wrapper.find('[type="submit"]');
+ const findCancelLink = () => wrapper.findByText(I18N_CANCEL);
+ const enterEmail = (email) => findEmailInput().setValue(email);
+ const submitForm = () => findForm().trigger('submit');
+
+ beforeEach(() => {
+ axiosMock = new MockAdapter(axios);
+ createComponent();
+ });
+
+ afterEach(() => {
+ createAlert.mockClear();
+ axiosMock.restore();
+ });
+
+ describe('when successfully verifying the email address', () => {
+ beforeEach(async () => {
+ enterEmail(validEmailAddress);
+
+ axiosMock
+ .onPatch(defaultPropsData.updateEmailPath)
+ .reply(HTTP_STATUS_OK, { status: SUCCESS_RESPONSE });
+
+ submitForm();
+ await axios.waitForAll();
+ });
+
+ it('shows a successfully updated alert', () => {
+ expect(createAlert).toHaveBeenCalledWith({
+ message: I18N_UPDATE_EMAIL_SUCCESS,
+ variant: VARIANT_SUCCESS,
+ });
+ });
+
+ it('emits a verifyToken event with the updated email address', () => {
+ expect(wrapper.emitted('verifyToken')[0]).toEqual([validEmailAddress]);
+ });
+ });
+
+ describe('error messages', () => {
+ beforeEach(() => {
+ enterEmail(invalidEmailAddress);
+ });
+
+ describe('when trying to submit an invalid email address', () => {
+ it('shows no error message before submitting the form', () => {
+ expect(wrapper.text()).not.toContain(I18N_EMAIL_INVALID);
+ expect(findSubmitButton().props('disabled')).toBe(false);
+ });
+
+ describe('when submitting the form', () => {
+ beforeEach(async () => {
+ submitForm();
+ await axios.waitForAll();
+ });
+
+ it('shows an error message and disables the submit button', () => {
+ expect(wrapper.text()).toContain(I18N_EMAIL_INVALID);
+ expect(findSubmitButton().props('disabled')).toBe(true);
+ });
+
+ describe('when entering a valid email address', () => {
+ beforeEach(() => {
+ enterEmail(validEmailAddress);
+ });
+
+ it('hides the error message and enables the submit button again', () => {
+ expect(wrapper.text()).not.toContain(I18N_EMAIL_INVALID);
+ expect(findSubmitButton().props('disabled')).toBe(false);
+ });
+ });
+ });
+ });
+
+ describe('when the server responds with an error message', () => {
+ const serverErrorMessage = 'server error message';
+
+ beforeEach(async () => {
+ enterEmail(validEmailAddress);
+
+ axiosMock
+ .onPatch(defaultPropsData.updateEmailPath)
+ .replyOnce(HTTP_STATUS_OK, { status: FAILURE_RESPONSE, message: serverErrorMessage });
+
+ submitForm();
+ await axios.waitForAll();
+ });
+
+ it('shows the error message and disables the submit button', () => {
+ expect(wrapper.text()).toContain(serverErrorMessage);
+ expect(findSubmitButton().props('disabled')).toBe(true);
+ });
+
+ describe('when entering a valid email address', () => {
+ beforeEach(async () => {
+ await enterEmail('');
+ enterEmail(validEmailAddress);
+ });
+
+ it('hides the error message and enables the submit button again', () => {
+ expect(wrapper.text()).not.toContain(serverErrorMessage);
+ expect(findSubmitButton().props('disabled')).toBe(false);
+ });
+ });
+ });
+
+ describe('when the server responds unexpectedly', () => {
+ it.each`
+ scenario | statusCode
+ ${'the response is undefined'} | ${HTTP_STATUS_OK}
+ ${'the request failed'} | ${HTTP_STATUS_NOT_FOUND}
+ `(`shows an alert when $scenario`, async ({ statusCode }) => {
+ enterEmail(validEmailAddress);
+
+ axiosMock.onPatch(defaultPropsData.updateEmailPath).replyOnce(statusCode);
+
+ submitForm();
+
+ await axios.waitForAll();
+
+ expect(createAlert).toHaveBeenCalledWith({
+ message: I18N_GENERIC_ERROR,
+ captureError: true,
+ error: expect.any(Error),
+ });
+ });
+ });
+ });
+
+ describe('when clicking the cancel link', () => {
+ beforeEach(() => {
+ findCancelLink().trigger('click');
+ });
+
+ it('emits a verifyToken event without an email address', () => {
+ expect(wrapper.emitted('verifyToken')[0]).toEqual([]);
+ });
+ });
+});
diff --git a/spec/helpers/sessions_helper_spec.rb b/spec/helpers/sessions_helper_spec.rb
index f35b6b28de8..366032100de 100644
--- a/spec/helpers/sessions_helper_spec.rb
+++ b/spec/helpers/sessions_helper_spec.rb
@@ -2,7 +2,7 @@
require 'spec_helper'
-RSpec.describe SessionsHelper do
+RSpec.describe SessionsHelper, feature_category: :system_access do
describe '#recently_confirmed_com?' do
subject { helper.recently_confirmed_com? }
@@ -51,6 +51,55 @@ RSpec.describe SessionsHelper do
end
end
+ describe '#unconfirmed_verification_email?', :freeze_time do
+ using RSpec::Parameterized::TableSyntax
+
+ let(:user) { build_stubbed(:user) }
+ let(:token_valid_for) { ::Users::EmailVerification::ValidateTokenService::TOKEN_VALID_FOR_MINUTES }
+
+ subject { helper.unconfirmed_verification_email?(user) }
+
+ where(:reset_first_offer?, :unconfirmed_email_present?, :token_valid?, :result) do
+ true | true | true | true
+ false | true | true | false
+ true | false | true | false
+ true | true | false | false
+ end
+
+ with_them do
+ before do
+ user.email_reset_offered_at = 1.minute.ago unless reset_first_offer?
+ user.unconfirmed_email = 'unconfirmed@email' if unconfirmed_email_present?
+ user.confirmation_sent_at = (token_valid? ? token_valid_for - 1 : token_valid_for + 1).minutes.ago
+ end
+
+ it { is_expected.to eq(result) }
+ end
+ end
+
+ describe '#verification_email' do
+ let(:unconfirmed_email) { 'unconfirmed@email' }
+ let(:user) { build_stubbed(:user, unconfirmed_email: unconfirmed_email) }
+
+ subject { helper.verification_email(user) }
+
+ context 'when there is an unconfirmed verification email' do
+ before do
+ allow(helper).to receive(:unconfirmed_verification_email?).and_return(true)
+ end
+
+ it { is_expected.to eq(unconfirmed_email) }
+ end
+
+ context 'when there is no unconfirmed verification email' do
+ before do
+ allow(helper).to receive(:unconfirmed_verification_email?).and_return(false)
+ end
+
+ it { is_expected.to eq(user.email) }
+ end
+ end
+
describe '#verification_data' do
let(:user) { build_stubbed(:user) }
@@ -58,7 +107,9 @@ RSpec.describe SessionsHelper do
expect(helper.verification_data(user)).to eq({
obfuscated_email: obfuscated_email(user.email),
verify_path: helper.session_path(:user),
- resend_path: users_resend_verification_code_path
+ resend_path: users_resend_verification_code_path,
+ offer_email_reset: user.email_reset_offered_at.nil?.to_s,
+ update_email_path: users_update_email_path
})
end
end
diff --git a/spec/helpers/sidebars_helper_spec.rb b/spec/helpers/sidebars_helper_spec.rb
index 1a81fec3a0b..3d675a65d99 100644
--- a/spec/helpers/sidebars_helper_spec.rb
+++ b/spec/helpers/sidebars_helper_spec.rb
@@ -105,7 +105,7 @@ RSpec.describe SidebarsHelper, feature_category: :navigation do
username: user.username,
avatar_url: user.avatar_url,
has_link_to_profile: helper.current_user_menu?(:profile),
- link_to_profile: user_url(user),
+ link_to_profile: user_path(user),
status: {
can_update: helper.can?(user, :update_user_status, user),
busy: user.status&.busy?,
@@ -138,7 +138,7 @@ RSpec.describe SidebarsHelper, feature_category: :navigation do
gitlab_com_and_canary: Gitlab.com_and_canary?,
canary_toggle_com_url: Gitlab::Saas.canary_toggle_com_url,
pinned_items: %w[foo bar],
- update_pins_url: pins_url,
+ update_pins_url: pins_path,
shortcut_links: [
{
title: _('Milestones'),
diff --git a/spec/models/application_setting_spec.rb b/spec/models/application_setting_spec.rb
index 51e0d4f3b11..bc3974fb1ee 100644
--- a/spec/models/application_setting_spec.rb
+++ b/spec/models/application_setting_spec.rb
@@ -86,6 +86,10 @@ RSpec.describe ApplicationSetting, feature_category: :shared, type: :model do
it { is_expected.not_to allow_value(['/example'] * 101).for(:protected_paths) }
it { is_expected.not_to allow_value(nil).for(:protected_paths) }
it { is_expected.to allow_value([]).for(:protected_paths) }
+ it { is_expected.to allow_value(['/example'] * 100).for(:protected_paths_for_get_request) }
+ it { is_expected.not_to allow_value(['/example'] * 101).for(:protected_paths_for_get_request) }
+ it { is_expected.not_to allow_value(nil).for(:protected_paths_for_get_request) }
+ it { is_expected.to allow_value([]).for(:protected_paths_for_get_request) }
it { is_expected.to allow_value(3).for(:push_event_hooks_limit) }
it { is_expected.not_to allow_value('three').for(:push_event_hooks_limit) }
diff --git a/spec/models/ci/runner_manager_spec.rb b/spec/models/ci/runner_manager_spec.rb
index 80cffb98dff..575064f0bea 100644
--- a/spec/models/ci/runner_manager_spec.rb
+++ b/spec/models/ci/runner_manager_spec.rb
@@ -331,4 +331,21 @@ RSpec.describe Ci::RunnerManager, feature_category: :runner_fleet, type: :model
.and change { runner_manager.reload.read_attribute(:executor_type) }
end
end
+
+ describe '#builds' do
+ let_it_be(:runner_manager) { create(:ci_runner_machine) }
+
+ subject(:builds) { runner_manager.builds }
+
+ it { is_expected.to be_empty }
+
+ context 'with an existing build' do
+ let!(:build) { create(:ci_build) }
+ let!(:runner_machine_build) do
+ create(:ci_runner_machine_build, runner_manager: runner_manager, build: build)
+ end
+
+ it { is_expected.to contain_exactly build }
+ end
+ end
end
diff --git a/spec/models/commit_collection_spec.rb b/spec/models/commit_collection_spec.rb
index 1d2d89573bb..be80aced3fd 100644
--- a/spec/models/commit_collection_spec.rb
+++ b/spec/models/commit_collection_spec.rb
@@ -27,11 +27,23 @@ RSpec.describe CommitCollection, feature_category: :source_code_management do
expect(collection.committers).to be_empty
end
- it 'excludes authors of merge commits' do
- commit = project.commit("60ecb67744cb56576c30214ff52294f8ce2def98")
- create(:user, email: commit.committer_email.upcase)
+ context 'when is with_merge_commits false' do
+ it 'excludes authors of merge commits' do
+ commit = project.commit("60ecb67744cb56576c30214ff52294f8ce2def98")
+ create(:user, email: commit.committer_email.upcase)
- expect(collection.committers).to be_empty
+ expect(collection.committers).to be_empty
+ end
+ end
+
+ context 'when is with_merge_commits true' do
+ let(:commit) { project.commit("60ecb67744cb56576c30214ff52294f8ce2def98") }
+
+ it 'does not exclude authors of merge commits' do
+ user = create(:user, email: commit.committer_email.upcase)
+
+ expect(collection.committers(with_merge_commits: true)).to contain_exactly(user)
+ end
end
context 'when committer email is nil' do
diff --git a/spec/models/merge_request_spec.rb b/spec/models/merge_request_spec.rb
index 1dfd14c0993..b463199a85b 100644
--- a/spec/models/merge_request_spec.rb
+++ b/spec/models/merge_request_spec.rb
@@ -1869,16 +1869,25 @@ RSpec.describe MergeRequest, factory_default: :keep, feature_category: :code_rev
end
describe '#committers' do
- it 'returns all the committers of every commit in the merge request' do
- users = subject.commits.without_merge_commits.map(&:committer_email).uniq.map do |email|
- create(:user, email: email)
- end
+ let(:commits) { double }
+ let(:committers) { double }
+
+ context 'when not given with_merge_commits' do
+ it 'calls committers on the commits object with the expected param' do
+ expect(subject).to receive(:commits).and_return(commits)
+ expect(commits).to receive(:committers).with(with_merge_commits: false).and_return(committers)
- expect(subject.committers).to match_array(users)
+ expect(subject.committers).to eq(committers)
+ end
end
- it 'returns an empty array if no committer is associated with a user' do
- expect(subject.committers).to be_empty
+ context 'when given with_merge_commits true' do
+ it 'calls committers on the commits object with the expected param' do
+ expect(subject).to receive(:commits).and_return(commits)
+ expect(commits).to receive(:committers).with(with_merge_commits: true).and_return(committers)
+
+ expect(subject.committers(with_merge_commits: true)).to eq(committers)
+ end
end
end
diff --git a/spec/models/user_spec.rb b/spec/models/user_spec.rb
index b43b149157c..5d964044041 100644
--- a/spec/models/user_spec.rb
+++ b/spec/models/user_spec.rb
@@ -124,6 +124,9 @@ RSpec.describe User, feature_category: :user_profile do
it { is_expected.to delegate_method(:organization).to(:user_detail).allow_nil }
it { is_expected.to delegate_method(:organization=).to(:user_detail).with_arguments(:args).allow_nil }
+
+ it { is_expected.to delegate_method(:email_reset_offered_at).to(:user_detail).allow_nil }
+ it { is_expected.to delegate_method(:email_reset_offered_at=).to(:user_detail).with_arguments(:args).allow_nil }
end
describe 'associations' do
diff --git a/spec/requests/verifies_with_email_spec.rb b/spec/requests/verifies_with_email_spec.rb
index 1c7e1bc9217..cc85ebc7ade 100644
--- a/spec/requests/verifies_with_email_spec.rb
+++ b/spec/requests/verifies_with_email_spec.rb
@@ -21,6 +21,16 @@ RSpec.describe 'VerifiesWithEmail', :clean_gitlab_redis_sessions, :clean_gitlab_
expect(mail.to).to match_array([user.email])
expect(mail.subject).to eq(s_('IdentityVerification|Verify your identity'))
end
+
+ context 'when an unconfirmed verification email exists' do
+ let(:new_email) { 'new@email' }
+ let(:user) { create(:user, unconfirmed_email: new_email, confirmation_sent_at: 1.minute.ago) }
+
+ it 'sends a verification instructions email to the unconfirmed email address' do
+ mail = ActionMailer::Base.deliveries.find { |d| d.to.include?(new_email) }
+ expect(mail.subject).to eq(s_('IdentityVerification|Verify your identity'))
+ end
+ end
end
shared_examples_for 'prompt for email verification' do
@@ -187,12 +197,42 @@ RSpec.describe 'VerifiesWithEmail', :clean_gitlab_redis_sessions, :clean_gitlab_
.and change { AuditEvent.count }.by(1)
.and change { AuthenticationEvent.count }.by(1)
.and change { user.last_activity_on }.to(Date.today)
+ .and change { user.email_reset_offered_at }.to(Time.current)
end
it 'returns the success status and a redirect path' do
submit_token
expect(json_response).to eq('status' => 'success', 'redirect_path' => users_successful_verification_path)
end
+
+ context 'when an unconfirmed verification email exists' do
+ before do
+ user.update!(email: new_email)
+ end
+
+ let(:new_email) { 'new@email' }
+
+ it 'confirms the email' do
+ expect { submit_token }
+ .to change { user.reload.email }.to(new_email)
+ .and change { user.confirmed_at }
+ .and change { user.unconfirmed_email }.from(new_email).to(nil)
+ end
+ end
+
+ context 'when email reset has already been offered' do
+ before do
+ user.update!(email_reset_offered_at: 1.hour.ago, email: 'new@email')
+ end
+
+ it 'does not change the email_reset_offered_at field' do
+ expect { submit_token }.not_to change { user.reload.email_reset_offered_at }
+ end
+
+ it 'does not confirm the email' do
+ expect { submit_token }.not_to change { user.reload.email }
+ end
+ end
end
context 'when not completing identity verification and logging in with another account' do
@@ -299,6 +339,79 @@ RSpec.describe 'VerifiesWithEmail', :clean_gitlab_redis_sessions, :clean_gitlab_
end
end
+ describe 'update_email' do
+ let(:new_email) { 'new@email' }
+
+ subject(:do_request) { patch(users_update_email_path(user: { email: new_email })) }
+
+ context 'when no verification_user_id session variable exists' do
+ it 'returns 204 No Content' do
+ do_request
+
+ expect(response).to have_gitlab_http_status(:no_content)
+ expect(response.body).to be_empty
+ end
+ end
+
+ context 'when a verification_user_id session variable exists' do
+ before do
+ stub_session(verification_user_id: user.id)
+ end
+
+ it 'locks the user' do
+ do_request
+
+ expect(user.reload.unlock_token).not_to be_nil
+ expect(user.locked_at).not_to be_nil
+ end
+
+ it 'sends a changed notification to the primary email and verification instructions to the unconfirmed email' do
+ perform_enqueued_jobs { do_request }
+
+ sent_mails = ActionMailer::Base.deliveries.map { |mail| { mail.to[0] => mail.subject } }
+
+ expect(sent_mails).to match_array([
+ { user.reload.unconfirmed_email => s_('IdentityVerification|Verify your identity') },
+ { user.email => 'Email Changed' }
+ ])
+ end
+
+ it 'calls the UpdateEmailService and returns a success response' do
+ expect_next_instance_of(Users::EmailVerification::UpdateEmailService, user: user) do |instance|
+ expect(instance).to receive(:execute).with(email: new_email).and_call_original
+ end
+
+ do_request
+
+ expect(json_response).to eq('status' => 'success')
+ end
+ end
+
+ context 'when failing to update the email address' do
+ let(:service_response) do
+ {
+ status: 'failure',
+ reason: 'the reason',
+ message: 'the message'
+ }
+ end
+
+ before do
+ stub_session(verification_user_id: user.id)
+ end
+
+ it 'calls the UpdateEmailService and returns an error response' do
+ expect_next_instance_of(Users::EmailVerification::UpdateEmailService, user: user) do |instance|
+ expect(instance).to receive(:execute).with(email: new_email).and_return(service_response)
+ end
+
+ do_request
+
+ expect(json_response).to eq(service_response.with_indifferent_access)
+ end
+ end
+ end
+
describe 'successful_verification' do
before do
allow(user).to receive(:role_required?).and_return(true) # It skips the required signup info before_action
diff --git a/spec/services/users/email_verification/update_email_service_spec.rb b/spec/services/users/email_verification/update_email_service_spec.rb
new file mode 100644
index 00000000000..8b4e5b8d7b5
--- /dev/null
+++ b/spec/services/users/email_verification/update_email_service_spec.rb
@@ -0,0 +1,119 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+
+RSpec.describe Users::EmailVerification::UpdateEmailService, feature_category: :instance_resiliency do
+ let_it_be_with_reload(:user) { create(:user) }
+ let(:email) { build_stubbed(:user).email }
+
+ describe '#execute' do
+ subject(:execute_service) { described_class.new(user: user).execute(email: email) }
+
+ context 'when successful' do
+ it { is_expected.to eq(status: :success) }
+
+ it 'does not send a confirmation instructions email' do
+ expect { execute_service }.not_to have_enqueued_mail(DeviseMailer, :confirmation_instructions)
+ end
+
+ it 'sets the unconfirmed_email and confirmation_sent_at fields', :freeze_time do
+ expect { execute_service }
+ .to change { user.unconfirmed_email }.from(nil).to(email)
+ .and change { user.confirmation_sent_at }.from(nil).to(Time.current)
+ end
+ end
+
+ context 'when rate limited' do
+ before do
+ allow(Gitlab::ApplicationRateLimiter).to receive(:throttled?)
+ .with(:email_verification_code_send, scope: user).and_return(true)
+ end
+
+ it 'returns a failure status' do
+ expect(execute_service).to eq(
+ {
+ status: :failure,
+ reason: :rate_limited,
+ message: format(s_("IdentityVerification|You've reached the maximum amount of tries. " \
+ 'Wait %{interval} and try again.'), interval: 'about 1 hour')
+ }
+ )
+ end
+ end
+
+ context 'when email reset has already been offered' do
+ before do
+ user.email_reset_offered_at = 1.minute.ago
+ end
+
+ it 'returns a failure status' do
+ expect(execute_service).to eq(
+ {
+ status: :failure,
+ reason: :already_offered,
+ message: s_('IdentityVerification|Email update is only offered once.')
+ }
+ )
+ end
+ end
+
+ context 'when email is unchanged' do
+ let(:email) { user.email }
+
+ it 'returns a failure status' do
+ expect(execute_service).to eq(
+ {
+ status: :failure,
+ reason: :no_change,
+ message: s_('IdentityVerification|A code has already been sent to this email address. ' \
+ 'Check your spam folder or enter another email address.')
+ }
+ )
+ end
+ end
+
+ context 'when email is missing' do
+ let(:email) { '' }
+
+ it 'returns a failure status' do
+ expect(execute_service).to eq(
+ {
+ status: :failure,
+ reason: :validation_error,
+ message: "Email can't be blank"
+ }
+ )
+ end
+ end
+
+ context 'when email is not valid' do
+ let(:email) { 'xxx' }
+
+ it 'returns a failure status' do
+ expect(execute_service).to eq(
+ {
+ status: :failure,
+ reason: :validation_error,
+ message: 'Email is invalid'
+ }
+ )
+ end
+ end
+
+ context 'when email is already taken' do
+ before do
+ create(:user, email: email)
+ end
+
+ it 'returns a failure status' do
+ expect(execute_service).to eq(
+ {
+ status: :failure,
+ reason: :validation_error,
+ message: 'Email has already been taken'
+ }
+ )
+ end
+ end
+ end
+end