Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2020-06-30 12:08:37 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2020-06-30 12:08:37 +0300
commite860bae967a4fa90213ff1980fac0bea8f894366 (patch)
treefc0b14df1f44d8e8393a2dd9b5b62aa2ed6630e9
parent0bc8084ef319ca1df4bfb885e66facd04b0540db (diff)
Add latest changes from gitlab-org/gitlab@master
-rw-r--r--.gitignore1
-rw-r--r--.gitlab/ci/rails.gitlab-ci.yml2
-rw-r--r--.gitlab/ci/rules.gitlab-ci.yml10
-rw-r--r--app/assets/javascripts/clusters_list/components/clusters.vue7
-rw-r--r--app/assets/javascripts/clusters_list/store/actions.js19
-rw-r--r--app/assets/javascripts/notes/components/note_actions.vue5
-rw-r--r--app/controllers/projects/issues_controller.rb1
-rw-r--r--changelogs/unreleased/224528-un-assign-issue-to-from-comment-author-action-visibility.yml5
-rw-r--r--changelogs/unreleased/fj-224486-add-snippets-size-column-to-root-storage-statistics.yml5
-rw-r--r--db/migrate/20200625082258_add_snippets_size_to_root_storage_statistics.rb19
-rw-r--r--db/structure.sql4
-rw-r--r--doc/administration/postgresql/replication_and_failover.md205
-rw-r--r--doc/administration/reference_architectures/index.md5
-rw-r--r--doc/administration/troubleshooting/postgresql.md2
-rw-r--r--doc/development/scalability.md3
-rw-r--r--doc/raketasks/cleanup.md6
-rw-r--r--locale/gitlab.pot3
-rw-r--r--spec/frontend/clusters_list/store/actions_spec.js46
-rw-r--r--spec/frontend/notes/components/note_actions_spec.js64
-rw-r--r--spec/rubocop/cop/static_translation_definition_spec.rb6
20 files changed, 368 insertions, 50 deletions
diff --git a/.gitignore b/.gitignore
index 29180b76e26..151c75d474a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,6 +17,7 @@ eslint-report.html
.rbx/
/.ruby-gemset
/.ruby-version
+/.tool-versions
/.rvmrc
.sass-cache/
/.secret
diff --git a/.gitlab/ci/rails.gitlab-ci.yml b/.gitlab/ci/rails.gitlab-ci.yml
index 657501337c4..084143f1695 100644
--- a/.gitlab/ci/rails.gitlab-ci.yml
+++ b/.gitlab/ci/rails.gitlab-ci.yml
@@ -224,7 +224,7 @@ gitlab:setup:
rspec:coverage:
extends:
- .rails-job-base
- - .rails:rules:ee-mr-and-master-only
+ - .rails:rules:rspec-coverage
stage: post-test
# We cannot use needs since it would mean needing 84 jobs (since most are parallelized)
# so we use `dependencies` here.
diff --git a/.gitlab/ci/rules.gitlab-ci.yml b/.gitlab/ci/rules.gitlab-ci.yml
index 86e3011e8f4..29822e97adb 100644
--- a/.gitlab/ci/rules.gitlab-ci.yml
+++ b/.gitlab/ci/rules.gitlab-ci.yml
@@ -390,9 +390,7 @@
rules:
- <<: *if-not-ee
when: never
- - <<: *if-dot-com-gitlab-org-master
- changes: *code-backstage-qa-patterns
- when: on_success
+ - <<: *if-master-schedule-2-hourly
############
# QA rules #
@@ -545,6 +543,12 @@
- <<: *if-merge-request
changes: *code-backstage-patterns
+.rails:rules:rspec-coverage:
+ rules:
+ - <<: *if-not-ee
+ when: never
+ - <<: *if-master-schedule-2-hourly
+
##################
# Releases rules #
##################
diff --git a/app/assets/javascripts/clusters_list/components/clusters.vue b/app/assets/javascripts/clusters_list/components/clusters.vue
index b7c72fcd50f..a51760599ea 100644
--- a/app/assets/javascripts/clusters_list/components/clusters.vue
+++ b/app/assets/javascripts/clusters_list/components/clusters.vue
@@ -1,5 +1,4 @@
<script>
-import * as Sentry from '@sentry/browser';
import { mapState, mapActions } from 'vuex';
import {
GlDeprecatedBadge as GlBadge,
@@ -88,7 +87,7 @@ export default {
this.fetchClusters();
},
methods: {
- ...mapActions(['fetchClusters', 'setPage']),
+ ...mapActions(['fetchClusters', 'reportSentryError', 'setPage']),
k8sQuantityToGb(quantity) {
if (!quantity) {
return 0;
@@ -150,7 +149,7 @@ export default {
};
}
} catch (error) {
- Sentry.captureException(error);
+ this.reportSentryError({ error, tag: 'totalMemoryAndUsageError' });
}
return { totalMemory: null, freeSpacePercentage: null };
@@ -183,7 +182,7 @@ export default {
};
}
} catch (error) {
- Sentry.captureException(error);
+ this.reportSentryError({ error, tag: 'totalCpuAndUsageError' });
}
return { totalCpu: null, freeSpacePercentage: null };
diff --git a/app/assets/javascripts/clusters_list/store/actions.js b/app/assets/javascripts/clusters_list/store/actions.js
index 7d8a728a134..dddcfb3d975 100644
--- a/app/assets/javascripts/clusters_list/store/actions.js
+++ b/app/assets/javascripts/clusters_list/store/actions.js
@@ -16,7 +16,14 @@ const allNodesPresent = (clusters, retryCount) => {
return retryCount > MAX_REQUESTS || clusters.every(cluster => cluster.nodes != null);
};
-export const fetchClusters = ({ state, commit }) => {
+export const reportSentryError = (_store, { error, tag }) => {
+ Sentry.withScope(scope => {
+ scope.setTag('javascript_clusters_list', tag);
+ Sentry.captureException(error);
+ });
+};
+
+export const fetchClusters = ({ state, commit, dispatch }) => {
let retryCount = 0;
commit(types.SET_LOADING_NODES, true);
@@ -49,10 +56,7 @@ export const fetchClusters = ({ state, commit }) => {
commit(types.SET_LOADING_CLUSTERS, false);
commit(types.SET_LOADING_NODES, false);
- Sentry.withScope(scope => {
- scope.setTag('javascript_clusters_list', 'fetchClustersSuccessCallback');
- Sentry.captureException(error);
- });
+ dispatch('reportSentryError', { error, tag: 'fetchClustersSuccessCallback' });
}
},
errorCallback: response => {
@@ -62,10 +66,7 @@ export const fetchClusters = ({ state, commit }) => {
commit(types.SET_LOADING_NODES, false);
flash(__('Clusters|An error occurred while loading clusters'));
- Sentry.withScope(scope => {
- scope.setTag('javascript_clusters_list', 'fetchClustersErrorCallback');
- Sentry.captureException(response);
- });
+ dispatch('reportSentryError', { error: response, tag: 'fetchClustersErrorCallback' });
},
});
diff --git a/app/assets/javascripts/notes/components/note_actions.vue b/app/assets/javascripts/notes/components/note_actions.vue
index 3b0fe8105e9..7615b0518b7 100644
--- a/app/assets/javascripts/notes/components/note_actions.vue
+++ b/app/assets/javascripts/notes/components/note_actions.vue
@@ -128,6 +128,9 @@ export default {
isIssue() {
return this.targetType === 'issue';
},
+ canAssign() {
+ return this.getNoteableData.current_user?.can_update && this.isIssue;
+ },
},
methods: {
onEdit() {
@@ -257,7 +260,7 @@ export default {
{{ __('Copy link') }}
</button>
</li>
- <li v-if="isIssue">
+ <li v-if="canAssign">
<button
class="btn-default btn-transparent"
data-testid="assign-user"
diff --git a/app/controllers/projects/issues_controller.rb b/app/controllers/projects/issues_controller.rb
index 803a779a1b3..92f6797ffe4 100644
--- a/app/controllers/projects/issues_controller.rb
+++ b/app/controllers/projects/issues_controller.rb
@@ -52,7 +52,6 @@ class Projects::IssuesController < Projects::ApplicationController
before_action only: :show do
push_frontend_feature_flag(:real_time_issue_sidebar, @project)
- push_frontend_feature_flag(:confidential_notes, @project)
push_frontend_feature_flag(:confidential_apollo_sidebar, @project)
end
diff --git a/changelogs/unreleased/224528-un-assign-issue-to-from-comment-author-action-visibility.yml b/changelogs/unreleased/224528-un-assign-issue-to-from-comment-author-action-visibility.yml
new file mode 100644
index 00000000000..be139a683c2
--- /dev/null
+++ b/changelogs/unreleased/224528-un-assign-issue-to-from-comment-author-action-visibility.yml
@@ -0,0 +1,5 @@
+---
+title: Resolve [Un]Assign Issue to/from Comment Author Action Visibility
+merge_request: 35459
+author:
+type: fixed
diff --git a/changelogs/unreleased/fj-224486-add-snippets-size-column-to-root-storage-statistics.yml b/changelogs/unreleased/fj-224486-add-snippets-size-column-to-root-storage-statistics.yml
new file mode 100644
index 00000000000..2e893cb6c2f
--- /dev/null
+++ b/changelogs/unreleased/fj-224486-add-snippets-size-column-to-root-storage-statistics.yml
@@ -0,0 +1,5 @@
+---
+title: Add snippets_size to namespace_root_storage_statistics
+merge_request: 35311
+author:
+type: changed
diff --git a/db/migrate/20200625082258_add_snippets_size_to_root_storage_statistics.rb b/db/migrate/20200625082258_add_snippets_size_to_root_storage_statistics.rb
new file mode 100644
index 00000000000..7dd0bd94805
--- /dev/null
+++ b/db/migrate/20200625082258_add_snippets_size_to_root_storage_statistics.rb
@@ -0,0 +1,19 @@
+# frozen_string_literal: true
+
+class AddSnippetsSizeToRootStorageStatistics < ActiveRecord::Migration[6.0]
+ include Gitlab::Database::MigrationHelpers
+
+ DOWNTIME = false
+
+ def up
+ with_lock_retries do
+ add_column :namespace_root_storage_statistics, :snippets_size, :bigint, default: 0, null: false
+ end
+ end
+
+ def down
+ with_lock_retries do
+ remove_column :namespace_root_storage_statistics, :snippets_size
+ end
+ end
+end
diff --git a/db/structure.sql b/db/structure.sql
index eb90cd4d3b8..be22d58ae23 100644
--- a/db/structure.sql
+++ b/db/structure.sql
@@ -12975,7 +12975,8 @@ CREATE TABLE public.namespace_root_storage_statistics (
wiki_size bigint DEFAULT 0 NOT NULL,
build_artifacts_size bigint DEFAULT 0 NOT NULL,
storage_size bigint DEFAULT 0 NOT NULL,
- packages_size bigint DEFAULT 0 NOT NULL
+ packages_size bigint DEFAULT 0 NOT NULL,
+ snippets_size bigint DEFAULT 0 NOT NULL
);
CREATE TABLE public.namespace_statistics (
@@ -23456,6 +23457,7 @@ COPY "schema_migrations" (version) FROM STDIN;
20200624075411
20200624222443
20200625045442
+20200625082258
20200625190458
20200626130220
\.
diff --git a/doc/administration/postgresql/replication_and_failover.md b/doc/administration/postgresql/replication_and_failover.md
index aa95b983d20..5b2c50c21d9 100644
--- a/doc/administration/postgresql/replication_and_failover.md
+++ b/doc/administration/postgresql/replication_and_failover.md
@@ -1127,3 +1127,208 @@ If you're running into an issue with a component not outlined here, be sure to c
- [Consul](../high_availability/consul.md#troubleshooting)
- [PostgreSQL](https://docs.gitlab.com/omnibus/settings/database.html#troubleshooting)
- [GitLab application](../high_availability/gitlab.md#troubleshooting)
+
+## Patroni
+
+NOTE: **Note:** Starting from GitLab 13.1, Patroni is available for **experimental** use to replace repmgr. Due to its
+experimental nature, Patroni support is **subject to change without notice.**
+
+Patroni is an opinionated solution for PostgreSQL high-availability. It takes the control of PostgreSQL, overrides its
+configuration and manages its lifecycle (start, stop, restart). This is a more active approach when compared to repmgr.
+Both repmgr and Patroni are both supported and available. But Patroni will be the default (and perhaps the only) option
+for PostgreSQL 12 clustering and cascading replication for Geo deployments.
+
+The [architecture](#example-recommended-setup-manual-steps) (that was mentioned above) does not change for Patroni.
+You do not need any special consideration for Patroni while provisioning your database nodes. Patroni heavily relies on
+Consul to store the state of the cluster and elect a leader. Any failure in Consul cluster and its leader election will
+propagate to Patroni cluster as well.
+
+Similar to repmgr, Patroni monitors the cluster and handles failover. When the primary node fails it works with Consul
+to notify PgBouncer. However, as opposed to repmgr, on failure, Patroni handles the transitioning of the old primary to
+a replica and rejoins it to the cluster automatically. So you do not need any manual operation for recovering the
+cluster as you do with repmgr.
+
+With Patroni the connection flow is slightly different. Patroni on each node connects to Consul agent to join the
+cluster. Only after this point it decides if the node is the primary or a replica. Based on this decision, it configures
+and starts PostgreSQL which it communicates with directly over a Unix socket. This implies that if Consul cluster is not
+functional or does not have a leader, Patroni and by extension PostgreSQL will not start. Patroni also exposes a REST
+API which can be accessed via its [default port](https://docs.gitlab.com/omnibus/package-information/defaults.html#patroni)
+on each node.
+
+### Configuring Patroni cluster
+
+You must enable Patroni explicitly to be able to use it (with `patroni['enable'] = true`). When Patroni is enabled
+repmgr will be disabled automatically.
+
+Any PostgreSQL configuration item that controls replication, for example `wal_level`, `max_wal_senders`, etc, are strictly
+controlled by Patroni and will override the original settings that you make with the `postgresql[...]` configuration key.
+Hence, they are all separated and placed under `patroni['postgresql'][...]`. This behavior is limited to replication.
+Patroni honours any other PostgreSQL configuration that was made with the `postgresql[...]` configuration key. For example,
+`max_wal_senders` by default is set to `5`. If you wish to change this you must set it with the `patroni['postgresql']['max_wal_senders']`
+configuration key.
+
+The configuration of Patroni node is very similar to a repmgr but shorter. When Patroni is enabled, first you can ignore
+any replication setting of PostgreSQL (it will be overwritten anyway). Then you can remove any `repmgr[...]` or
+repmgr-specific configuration as well. Especially, make sure that you remove `postgresql['shared_preload_libraries'] = 'repmgr_funcs'`.
+
+Here is an example similar to [the one that was done with repmgr](#configuring-the-database-nodes):
+
+```ruby
+# Disable all components except PostgreSQL and Repmgr and Consul
+roles['postgres_role']
+
+# Enable Patroni
+patroni['enable'] = true
+
+# PostgreSQL configuration
+postgresql['listen_address'] = '0.0.0.0'
+
+# Disable automatic database migrations
+gitlab_rails['auto_migrate'] = false
+
+# Configure the Consul agent
+consul['services'] = %w(postgresql)
+
+# START user configuration
+# Please set the real values as explained in Required Information section
+#
+# Replace PGBOUNCER_PASSWORD_HASH with a generated md5 value
+postgresql['pgbouncer_user_password'] = 'PGBOUNCER_PASSWORD_HASH'
+# Replace POSTGRESQL_PASSWORD_HASH with a generated md5 value
+postgresql['sql_user_password'] = 'POSTGRESQL_PASSWORD_HASH'
+
+# Replace X with value of number of db nodes + 1 (OPTIONAL the default value is 5)
+patroni['postgresql']['max_wal_senders'] = X
+patroni['postgresql']['max_replication_slots'] = X
+
+# Replace XXX.XXX.XXX.XXX/YY with Network Address
+postgresql['trust_auth_cidr_addresses'] = %w(XXX.XXX.XXX.XXX/YY)
+
+# Replace placeholders:
+#
+# Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z
+# with the addresses gathered for CONSUL_SERVER_NODES
+consul['configuration'] = {
+ retry_join: %w(Y.Y.Y.Y consul1.gitlab.example.com Z.Z.Z.Z)
+}
+#
+# END user configuration
+```
+
+You do not need an additional or different configuration for replica nodes. As a matter of fact, you don't have to have
+a predetermined primary node. Therefore all database nodes use the same configuration.
+
+Once the configuration of a node is done, you must [reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure)
+on each node for the changes to take effect.
+
+Generally, when Consul cluster is ready, the first node that [reconfigures](../restart_gitlab.md#omnibus-gitlab-reconfigure)
+becomes the leader. You do not need to sequence the nodes reconfiguration. You can run them in parallel or in any order.
+If you choose an arbitrary order you do not have any predetermined master.
+
+As opposed to repmgr, once the nodes are reconfigured you do not need any further action or additional command to join
+the replicas.
+
+#### Database authorization for Patroni
+
+Patroni uses Unix socket to manage PostgreSQL instance. Therefore, the connection from the `local` socket must be trusted.
+
+Also, replicas use the replication user (`gitlab_replicator` by default) to communicate with the leader. For this user,
+you can choose between `trust` and `md5` authentication. If you set `postgresql['sql_replication_password']`,
+Patroni will use `md5` authentication, otherwise it falls back to `trust`. You must to specify the cluster CIDR in
+`postgresql['md5_auth_cidr_addresses']` or `postgresql['trust_auth_cidr_addresses']` respectively.
+
+### Interacting with Patroni cluster
+
+You can use `gitlab-ctl patroni members` to check the status of the cluster members. To check the status of each node
+`gitlab-ctl patroni` provides two additional sub-commands, `check-leader` and `check-replica` which indicate if a node
+is the primary or a replica.
+
+When Patroni is enabled, you don't have direct control over `postgresql` service. Patroni will signal PostgreSQL's startup,
+shutdown, and restart. For example, for shutting down PostgreSQL on a node, you must shutdown Patroni on the same node
+with:
+
+```shell
+sudo gitlab-ctl stop patroni
+```
+
+### Failover procedure for Patroni
+
+With Patroni, you have two slightly different options: failover and switchover. Essentially, failover allows you to
+perform a manual failover when there are no healthy nodes, while switchover only works when the cluster is healthy and
+allows you to schedule a switchover (it can happen immediately). For further details, see
+[Patroni documentation on this subject](https://patroni.readthedocs.io/en/latest/rest_api.html#switchover-and-failover-endpoints).
+
+To schedule a switchover:
+
+```shell
+sudo gitlab-ctl patroni switchover
+```
+
+For manual failover:
+
+```shell
+sudo gitlab-ctl patroni failover
+```
+
+### Recovering the Patroni cluster
+
+To recover the old primary and rejoin it to the cluster as a replica, you can simply start Patroni with:
+
+```shell
+sudo gitlab-ctl start patroni
+```
+
+No further configuration or intervention is needed.
+
+### Maintenance procedure for Patroni
+
+With Patroni enabled, you can run a planned maintenance. If you want to do some maintenance work on one node and you
+don't want Patroni to manage it, you can use put it into maintenance mode:
+
+```shell
+sudo gitlab-ctl patroni pause
+```
+
+When Patroni runs in a paused mode, it does not change the state of PostgreSQL. Once you are done you can resume Patroni:
+
+```shell
+sudo gitlab-ctl patroni resume
+```
+
+For further details, see [Patroni documentation on this subject](https://patroni.readthedocs.io/en/latest/pause.html).
+
+### Switching from repmgr to Patroni
+
+CAUTION: **Warning:**
+Although switching from repmgr to Patroni is fairly straightforward the other way around is not. Rolling back from
+Patroni to repmgr can be complicated and may involve deletion of data directory. If you need to do that, please contact
+GitLab support.
+
+You can switch an exiting database cluster to use Patroni instead of repmgr with the following steps:
+
+1. Stop repmgr on all replica nodes and lastly with the primary node:
+
+ ```shell
+ sudo gitlab-ctl stop repmgrd
+ ```
+
+1. Stop PostgreSQL on all replica nodes:
+
+ ```shell
+ sudo gitlab-ctl stop postgresql
+ ```
+
+ NOTE: **Note:** Ensure that there is no `walsender` process running on the primary node.
+ `ps aux | grep walsender` must not show any running process.
+
+1. On the primary node, [configure Patroni](#configuring-patroni-cluster). Remove `repmgr` and any other
+ repmgr-specific configuration. Also remove any configuration that is related to PostgreSQL replication.
+1. [Reconfigure Omnibus GitLab](../restart_gitlab.md#omnibus-gitlab-reconfigure) on the primary node. It will become
+ the leader. You can check this with:
+
+ ```shell
+ sudo gitlab-ctl tail patroni
+ ```
+
+1. Repeat the last two steps for all replica nodes. `gitlab.rb` should look the same on all nodes.
+1. Optional: You can remove `gitlab_repmgr` database and role on the primary.
diff --git a/doc/administration/reference_architectures/index.md b/doc/administration/reference_architectures/index.md
index d42263c6ce1..1275379a7e0 100644
--- a/doc/administration/reference_architectures/index.md
+++ b/doc/administration/reference_architectures/index.md
@@ -120,13 +120,13 @@ As long as at least one of each component is online and capable of handling the
### Automated database failover **(PREMIUM ONLY)**
> - Level of complexity: **High**
-> - Required domain knowledge: PgBouncer, Repmgr, shared storage, distributed systems
+> - Required domain knowledge: PgBouncer, Repmgr or Patroni, shared storage, distributed systems
> - Supported tiers: [GitLab Premium and Ultimate](https://about.gitlab.com/pricing/)
By adding automatic failover for database systems, you can enable higher uptime
with additional database nodes. This extends the default database with
cluster management and failover policies.
-[PgBouncer in conjunction with Repmgr](../postgresql/replication_and_failover.md)
+[PgBouncer in conjunction with Repmgr or Patroni](../postgresql/replication_and_failover.md)
is recommended.
### Instance level replication with GitLab Geo **(PREMIUM ONLY)**
@@ -164,6 +164,7 @@ column.
| [PostgreSQL](../../development/architecture.md#postgresql) | Database | [PostgreSQL configuration](https://docs.gitlab.com/omnibus/settings/database.html) | Yes |
| [PgBouncer](../../development/architecture.md#pgbouncer) | Database connection pooler | [PgBouncer configuration](../high_availability/pgbouncer.md#running-pgbouncer-as-part-of-a-non-ha-gitlab-installation) **(PREMIUM ONLY)** | Yes |
| Repmgr | PostgreSQL cluster management and failover | [PostgreSQL and Repmgr configuration](../postgresql/replication_and_failover.md) | Yes |
+| Patroni | An alternative PostgreSQL cluster management and failover | [PostgreSQL and Patroni configuration](../postgresql/replication_and_failover.md#patroni) | Yes |
| [Redis](../../development/architecture.md#redis) ([3](#footnotes)) | Key/value store for fast data lookup and caching | [Redis configuration](../high_availability/redis.md) | Yes |
| Redis Sentinel | Redis | [Redis Sentinel configuration](../high_availability/redis.md) | Yes |
| [Gitaly](../../development/architecture.md#gitaly) ([2](#footnotes)) ([7](#footnotes)) | Provides access to Git repositories | [Gitaly configuration](../gitaly/index.md#run-gitaly-on-its-own-server) | Yes |
diff --git a/doc/administration/troubleshooting/postgresql.md b/doc/administration/troubleshooting/postgresql.md
index e5a4dffb3cc..6dfc1197161 100644
--- a/doc/administration/troubleshooting/postgresql.md
+++ b/doc/administration/troubleshooting/postgresql.md
@@ -46,7 +46,7 @@ This section is for links to information elsewhere in the GitLab documentation.
- Managing Omnibus PostgreSQL versions [from the development docs](https://docs.gitlab.com/omnibus/development/managing-postgresql-versions.html)
- [PostgreSQL scaling](../postgresql/replication_and_failover.md)
- - including [troubleshooting](../postgresql/replication_and_failover.md#troubleshooting) `gitlab-ctl repmgr-check-master` and PgBouncer errors
+ - including [troubleshooting](../postgresql/replication_and_failover.md#troubleshooting) `gitlab-ctl repmgr-check-master` (or `gitlab-ctl patroni check-leader` if you are using Patroni) and PgBouncer errors
- [Developer database documentation](../../development/README.md#database-guides) - some of which is absolutely not for production use. Including:
- understanding EXPLAIN plans
diff --git a/doc/development/scalability.md b/doc/development/scalability.md
index c0c26df88b5..0fb54d89913 100644
--- a/doc/development/scalability.md
+++ b/doc/development/scalability.md
@@ -115,8 +115,7 @@ that backup, the database can apply the WAL logs in order until the
database has reached the target time.
On GitLab.com, Consul and Patroni work together to coordinate failovers with
-the read replicas. [Omnibus ships with repmgr instead of
-Patroni](../administration/postgresql/replication_and_failover.md).
+the read replicas. [Omnibus ships with both repmgr and Patroni](../administration/postgresql/replication_and_failover.md).
#### Load-balancing
diff --git a/doc/raketasks/cleanup.md b/doc/raketasks/cleanup.md
index 5bdae998ec9..76c51bab6f8 100644
--- a/doc/raketasks/cleanup.md
+++ b/doc/raketasks/cleanup.md
@@ -142,7 +142,7 @@ When you notice there are more job artifacts files on disk than there
should be, you can run:
```shell
-gitlab-rake gitlab:cleanup:orphan_job_artifact_files
+sudo gitlab-rake gitlab:cleanup:orphan_job_artifact_files
```
This command:
@@ -156,13 +156,13 @@ delete. Run the command with `DRY_RUN=false` if you actually want to
delete the files:
```shell
-gitlab-rake gitlab:cleanup:orphan_job_artifact_files DRY_RUN=false
+sudo gitlab-rake gitlab:cleanup:orphan_job_artifact_files DRY_RUN=false
```
You can also limit the number of files to delete with `LIMIT`:
```shell
-gitlab-rake gitlab:cleanup:orphan_job_artifact_files LIMIT=100
+sudo gitlab-rake gitlab:cleanup:orphan_job_artifact_files LIMIT=100
```
This will only delete up to 100 files from disk. You can use this to
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 820aff7e733..39951ea79f3 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -24719,6 +24719,9 @@ msgstr ""
msgid "UsageQuota|Pipelines"
msgstr ""
+msgid "UsageQuota|Purchase more storage"
+msgstr ""
+
msgid "UsageQuota|Repositories"
msgstr ""
diff --git a/spec/frontend/clusters_list/store/actions_spec.js b/spec/frontend/clusters_list/store/actions_spec.js
index a51f7920ace..c8556350747 100644
--- a/spec/frontend/clusters_list/store/actions_spec.js
+++ b/spec/frontend/clusters_list/store/actions_spec.js
@@ -13,6 +13,28 @@ import * as Sentry from '@sentry/browser';
jest.mock('~/flash.js');
describe('Clusters store actions', () => {
+ let captureException;
+
+ describe('reportSentryError', () => {
+ beforeEach(() => {
+ captureException = jest.spyOn(Sentry, 'captureException');
+ });
+
+ afterEach(() => {
+ captureException.mockRestore();
+ });
+
+ it('should report sentry error', done => {
+ const sentryError = new Error('New Sentry Error');
+ const tag = 'sentryErrorTag';
+
+ testAction(actions.reportSentryError, { error: sentryError, tag }, {}, [], [], () => {
+ expect(captureException).toHaveBeenCalledWith(sentryError);
+ done();
+ });
+ });
+ });
+
describe('fetchClusters', () => {
let mock;
@@ -69,7 +91,15 @@ describe('Clusters store actions', () => {
{ type: types.SET_LOADING_CLUSTERS, payload: false },
{ type: types.SET_LOADING_NODES, payload: false },
],
- [],
+ [
+ {
+ type: 'reportSentryError',
+ payload: {
+ error: new Error('Request failed with status code 400'),
+ tag: 'fetchClustersErrorCallback',
+ },
+ },
+ ],
() => {
expect(flashError).toHaveBeenCalledWith(expect.stringMatching('error'));
done();
@@ -78,7 +108,6 @@ describe('Clusters store actions', () => {
});
describe('multiple api requests', () => {
- let captureException;
let pollRequest;
let pollStop;
@@ -86,7 +115,6 @@ describe('Clusters store actions', () => {
const pollHeaders = { 'poll-interval': pollInterval, ...headers };
beforeEach(() => {
- captureException = jest.spyOn(Sentry, 'captureException');
pollRequest = jest.spyOn(Poll.prototype, 'makeRequest');
pollStop = jest.spyOn(Poll.prototype, 'stop');
@@ -94,7 +122,6 @@ describe('Clusters store actions', () => {
});
afterEach(() => {
- captureException.mockRestore();
pollRequest.mockRestore();
pollStop.mockRestore();
});
@@ -164,11 +191,18 @@ describe('Clusters store actions', () => {
{ type: types.SET_LOADING_CLUSTERS, payload: false },
{ type: types.SET_LOADING_NODES, payload: false },
],
- [],
+ [
+ {
+ type: 'reportSentryError',
+ payload: {
+ error: new Error('clusters.every is not a function'),
+ tag: 'fetchClustersSuccessCallback',
+ },
+ },
+ ],
() => {
expect(pollRequest).toHaveBeenCalledTimes(1);
expect(pollStop).toHaveBeenCalledTimes(1);
- expect(captureException).toHaveBeenCalledTimes(1);
done();
},
);
diff --git a/spec/frontend/notes/components/note_actions_spec.js b/spec/frontend/notes/components/note_actions_spec.js
index 220ac22d8eb..5cc56cdefae 100644
--- a/spec/frontend/notes/components/note_actions_spec.js
+++ b/spec/frontend/notes/components/note_actions_spec.js
@@ -127,25 +127,63 @@ describe('noteActions', () => {
.catch(done.fail);
});
- it('should be possible to assign or unassign the comment author', () => {
- wrapper = shallowMountNoteActions(props, {
- targetType: () => 'issue',
- });
-
+ it('should not be possible to assign or unassign the comment author in a merge request', () => {
const assignUserButton = wrapper.find('[data-testid="assign-user"]');
- expect(assignUserButton.exists()).toBe(true);
+ expect(assignUserButton.exists()).toBe(false);
+ });
+ });
+ });
- assignUserButton.trigger('click');
- axiosMock.onPut(`${TEST_HOST}/api/v4/projects/group/project/issues/1`).reply(() => {
- expect(actions.updateAssignees).toHaveBeenCalled();
- });
+ describe('when a user has access to edit an issue', () => {
+ const testButtonClickTriggersAction = () => {
+ axiosMock.onPut(`${TEST_HOST}/api/v4/projects/group/project/issues/1`).reply(() => {
+ expect(actions.updateAssignees).toHaveBeenCalled();
});
- it('should not be possible to assign or unassign the comment author in a merge request', () => {
- const assignUserButton = wrapper.find('[data-testid="assign-user"]');
- expect(assignUserButton.exists()).toBe(false);
+ const assignUserButton = wrapper.find('[data-testid="assign-user"]');
+ expect(assignUserButton.exists()).toBe(true);
+ assignUserButton.trigger('click');
+ };
+
+ beforeEach(() => {
+ wrapper = shallowMountNoteActions(props, {
+ targetType: () => 'issue',
});
+ store.state.noteableData = {
+ current_user: {
+ can_update: true,
+ },
+ };
+ store.state.userData = userDataMock;
});
+
+ afterEach(() => {
+ wrapper.destroy();
+ axiosMock.restore();
+ });
+
+ it('should be possible to assign the comment author', testButtonClickTriggersAction);
+ it('should be possible to unassign the comment author', testButtonClickTriggersAction);
+ });
+
+ describe('when a user does not have access to edit an issue', () => {
+ const testButtonDoesNotRender = () => {
+ const assignUserButton = wrapper.find('[data-testid="assign-user"]');
+ expect(assignUserButton.exists()).toBe(false);
+ };
+
+ beforeEach(() => {
+ wrapper = shallowMountNoteActions(props, {
+ targetType: () => 'issue',
+ });
+ });
+
+ afterEach(() => {
+ wrapper.destroy();
+ });
+
+ it('should not be possible to assign the comment author', testButtonDoesNotRender);
+ it('should not be possible to unassign the comment author', testButtonDoesNotRender);
});
describe('user is not logged in', () => {
diff --git a/spec/rubocop/cop/static_translation_definition_spec.rb b/spec/rubocop/cop/static_translation_definition_spec.rb
index 2bad10d5e39..b6c9f6a25df 100644
--- a/spec/rubocop/cop/static_translation_definition_spec.rb
+++ b/spec/rubocop/cop/static_translation_definition_spec.rb
@@ -1,13 +1,13 @@
# frozen_string_literal: true
-require 'spec_helper'
+require 'fast_spec_helper'
require 'rubocop'
-require 'rubocop/rspec/support'
+require 'rspec-parameterized'
require_relative '../../../rubocop/cop/static_translation_definition'
-RSpec.describe RuboCop::Cop::StaticTranslationDefinition do
+RSpec.describe RuboCop::Cop::StaticTranslationDefinition, type: :rubocop do
include CopHelper
using RSpec::Parameterized::TableSyntax