Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitlab/issue_templates/Experiment Implementation.md4
-rw-r--r--app/assets/javascripts/lib/utils/listbox_helpers.js45
-rw-r--r--app/assets/stylesheets/pages/notes.scss2
-rw-r--r--app/assets/stylesheets/startup/startup-dark.scss3
-rw-r--r--app/assets/stylesheets/startup/startup-general.scss3
-rw-r--r--app/assets/stylesheets/startup/startup-signin.scss3
-rw-r--r--app/views/admin/application_settings/service_usage_data.html.haml2
-rw-r--r--data/deprecations/16-1-dependency-proxy-graphql-field.yml11
-rw-r--r--doc/administration/audit_event_streaming.md17
-rw-r--r--doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md2
-rw-r--r--doc/api/usage_data.md2
-rw-r--r--doc/ci/pipelines/pipeline_security.md48
-rw-r--r--doc/development/cicd/templates.md6
-rw-r--r--doc/development/database/foreign_keys.md2
-rw-r--r--doc/development/database/query_performance.md2
-rw-r--r--doc/development/internal_analytics/index.md12
-rw-r--r--doc/development/internal_analytics/service_ping/implement.md882
-rw-r--r--doc/development/internal_analytics/service_ping/index.md509
-rw-r--r--doc/development/internal_analytics/service_ping/metrics_dictionary.md334
-rw-r--r--doc/development/internal_analytics/service_ping/metrics_instrumentation.md478
-rw-r--r--doc/development/internal_analytics/service_ping/metrics_lifecycle.md106
-rw-r--r--doc/development/internal_analytics/service_ping/performance_indicator_metrics.md16
-rw-r--r--doc/development/internal_analytics/service_ping/review_guidelines.md80
-rw-r--r--doc/development/internal_analytics/service_ping/troubleshooting.md164
-rw-r--r--doc/development/internal_analytics/service_ping/usage_data.md69
-rw-r--r--doc/development/internal_analytics/snowplow/event_dictionary_guide.md91
-rw-r--r--doc/development/internal_analytics/snowplow/implementation.md523
-rw-r--r--doc/development/internal_analytics/snowplow/index.md201
-rw-r--r--doc/development/internal_analytics/snowplow/infrastructure.md101
-rw-r--r--doc/development/internal_analytics/snowplow/review_guidelines.md44
-rw-r--r--doc/development/internal_analytics/snowplow/schemas.md190
-rw-r--r--doc/development/internal_analytics/snowplow/troubleshooting.md80
-rw-r--r--doc/development/product_qualified_lead_guide/index.md2
-rw-r--r--doc/development/service_ping/implement.md885
-rw-r--r--doc/development/service_ping/index.md512
-rw-r--r--doc/development/service_ping/metrics_dictionary.md337
-rw-r--r--doc/development/service_ping/metrics_instrumentation.md481
-rw-r--r--doc/development/service_ping/metrics_lifecycle.md109
-rw-r--r--doc/development/service_ping/performance_indicator_metrics.md19
-rw-r--r--doc/development/service_ping/review_guidelines.md83
-rw-r--r--doc/development/service_ping/troubleshooting.md167
-rw-r--r--doc/development/service_ping/usage_data.md72
-rw-r--r--doc/development/snowplow/event_dictionary_guide.md94
-rw-r--r--doc/development/snowplow/implementation.md526
-rw-r--r--doc/development/snowplow/index.md204
-rw-r--r--doc/development/snowplow/infrastructure.md104
-rw-r--r--doc/development/snowplow/review_guidelines.md47
-rw-r--r--doc/development/snowplow/schemas.md193
-rw-r--r--doc/development/snowplow/troubleshooting.md83
-rw-r--r--doc/raketasks/index.md2
-rw-r--r--doc/update/deprecations.md16
-rw-r--r--doc/user/admin_area/settings/index.md2
-rw-r--r--doc/user/admin_area/settings/usage_statistics.md8
-rw-r--r--doc/user/packages/container_registry/delete_container_registry_images.md8
-rw-r--r--doc/user/packages/container_registry/index.md34
-rw-r--r--doc/user/packages/container_registry/reduce_container_registry_storage.md6
-rw-r--r--doc/user/packages/container_registry/troubleshoot_container_registry.md4
-rw-r--r--doc/user/packages/dependency_proxy/index.md12
-rw-r--r--doc/user/packages/harbor_container_registry/index.md21
-rw-r--r--doc/user/project/settings/index.md59
-rw-r--r--doc/user/project/wiki/group.md10
-rw-r--r--doc/user/project/wiki/index.md64
-rw-r--r--doc/user/usage_quotas.md2
-rw-r--r--lib/gitlab/audit/type/definition.rb6
-rw-r--r--locale/gitlab.pot15
-rw-r--r--package.json2
-rw-r--r--spec/features/admin/admin_settings_spec.rb10
-rw-r--r--spec/frontend/analytics/shared/components/projects_dropdown_filter_spec.js7
-rw-r--r--spec/frontend/lib/utils/listbox_helpers_spec.js89
-rw-r--r--spec/lib/gitlab/audit/type/definition_spec.rb24
-rw-r--r--yarn.lock8
71 files changed, 4406 insertions, 3953 deletions
diff --git a/.gitlab/issue_templates/Experiment Implementation.md b/.gitlab/issue_templates/Experiment Implementation.md
index 56202240ef5..481389f8023 100644
--- a/.gitlab/issue_templates/Experiment Implementation.md
+++ b/.gitlab/issue_templates/Experiment Implementation.md
@@ -12,13 +12,13 @@
# Inclusions and exclusions
<!-- These would be the rules for which given context (and are limited to context or resolvable at experiment time details) is included or excluded from the test. An example of this would be to only run an experiment on groups less than N number of days old. -->
-# Segmentation
+# Segmentation
<!-- Rules for always saying context with these criteria always get this variant. For instance, if you want to always give groups less than N number of days old the experiment experience, they are specified here. This is different from the exclusion rules above. -->
# Tracking Details
- [json schema](https://gitlab.com/gitlab-org/iglu/-/blob/master/public/schemas/com.gitlab/gitlab_experiment/jsonschema/0-3-0) used in `gitlab-experiment` tracking.
-- see [event schema](https://docs.gitlab.com/ee/development/snowplow/index.html#event-schema) for a guide.
+- see [event schema](../../doc/development/internal_analytics/snowplow/index.md#event-schema) for a guide.
| sequence | activity | category | action | label | property | value |
| -------- | -------- | ------ | ----- | ------- | -------- | ----- |
diff --git a/app/assets/javascripts/lib/utils/listbox_helpers.js b/app/assets/javascripts/lib/utils/listbox_helpers.js
new file mode 100644
index 00000000000..b43a29ad28b
--- /dev/null
+++ b/app/assets/javascripts/lib/utils/listbox_helpers.js
@@ -0,0 +1,45 @@
+import { n__ } from '~/locale';
+
+/**
+ * Accepts an array of options and an array of selected option IDs
+ * and optionally a placeholder and maximum number of options to show.
+ *
+ * Returns a string with the text of the selected options:
+ * - If no options are selected, returns the placeholder or an empty string.
+ * - If less than maxOptionsShown is selected, returns the text of those options comma-separated.
+ * - If more than maxOptionsShown is selected, returns the text of those options comma-separated
+ * followed by the text "+X more", where X is the number of additional selected options
+ *
+ * @param {Object} opts
+ * @param {Array<{ id: number | string, value: string }>} opts.options
+ * @param {Array<{ id: number | string }>} opts.selected
+ * @param {String} opts.placeholder - Placeholder when no option is selected
+ * @param {Integer} opts.maxOptionsShown – Max number of options to show
+ * @returns {String}
+ */
+export const getSelectedOptionsText = ({
+ options,
+ selected,
+ placeholder = '',
+ maxOptionsShown = 1,
+}) => {
+ const selectedOptions = options.filter(({ id, value }) => selected.includes(id || value));
+
+ if (selectedOptions.length === 0) {
+ return placeholder;
+ }
+
+ const optionTexts = selectedOptions.map((option) => option.text);
+
+ if (selectedOptions.length <= maxOptionsShown) {
+ return optionTexts.join(', ');
+ }
+
+ // Prevent showing "+-1 more" when the array is empty.
+ const additionalItemsCount = selectedOptions.length - maxOptionsShown;
+ return `${optionTexts.slice(0, maxOptionsShown).join(', ')} ${n__(
+ '+%d more',
+ '+%d more',
+ additionalItemsCount,
+ )}`;
+};
diff --git a/app/assets/stylesheets/pages/notes.scss b/app/assets/stylesheets/pages/notes.scss
index 069ccad4435..c5b644bd72f 100644
--- a/app/assets/stylesheets/pages/notes.scss
+++ b/app/assets/stylesheets/pages/notes.scss
@@ -44,7 +44,7 @@ $system-note-icon-m-left: $avatar-m-left + $icon-size-diff / $avatar-m-ratio;
background: var(--gray-50, $gray-50);
}
- .timeline-entry:last-child::before {
+ .timeline-entry:not(.draft-note):last-child::before {
background: var(--white);
.gl-dark & {
diff --git a/app/assets/stylesheets/startup/startup-dark.scss b/app/assets/stylesheets/startup/startup-dark.scss
index 02cc86a77ea..7be15c2d8f9 100644
--- a/app/assets/stylesheets/startup/startup-dark.scss
+++ b/app/assets/stylesheets/startup/startup-dark.scss
@@ -2,6 +2,9 @@
// Please see the feedback issue for more details and help:
// https://gitlab.com/gitlab-org/gitlab/-/issues/331812
@charset "UTF-8";
+:root {
+ --white: #333238;
+}
*,
*::before,
*::after {
diff --git a/app/assets/stylesheets/startup/startup-general.scss b/app/assets/stylesheets/startup/startup-general.scss
index 5e3944e62f1..65500800ce3 100644
--- a/app/assets/stylesheets/startup/startup-general.scss
+++ b/app/assets/stylesheets/startup/startup-general.scss
@@ -2,6 +2,9 @@
// Please see the feedback issue for more details and help:
// https://gitlab.com/gitlab-org/gitlab/-/issues/331812
@charset "UTF-8";
+:root {
+ --white: #fff;
+}
*,
*::before,
*::after {
diff --git a/app/assets/stylesheets/startup/startup-signin.scss b/app/assets/stylesheets/startup/startup-signin.scss
index d081fdfa50b..40e1e4b1996 100644
--- a/app/assets/stylesheets/startup/startup-signin.scss
+++ b/app/assets/stylesheets/startup/startup-signin.scss
@@ -2,6 +2,9 @@
// Please see the feedback issue for more details and help:
// https://gitlab.com/gitlab-org/gitlab/-/issues/331812
@charset "UTF-8";
+:root {
+ --white: #fff;
+}
*,
*::before,
*::after {
diff --git a/app/views/admin/application_settings/service_usage_data.html.haml b/app/views/admin/application_settings/service_usage_data.html.haml
index e42c1091bf2..24f132b982a 100644
--- a/app/views/admin/application_settings/service_usage_data.html.haml
+++ b/app/views/admin/application_settings/service_usage_data.html.haml
@@ -25,7 +25,7 @@
- c.with_body do
- enable_service_ping_link_url = help_page_path('user/admin_area/settings/usage_statistics', anchor: 'enable-or-disable-usage-statistics')
- enable_service_ping_link = '<a href="%{url}">'.html_safe % { url: enable_service_ping_link_url }
- - generate_manually_link_url = help_page_path('development/service_ping/troubleshooting', anchor: 'generate-service-ping')
+ - generate_manually_link_url = help_page_path('development/internal_analytics/service_ping/troubleshooting', anchor: 'generate-service-ping')
- generate_manually_link = '<a href="%{url}" target="_blank" rel="noopener noreferrer">'.html_safe % { url: generate_manually_link_url }
= html_escape(s_('%{enable_service_ping_link_start}Enable%{link_end} or %{generate_manually_link_start}generate%{link_end} Service Ping to preview and download service usage data payload.')) % { enable_service_ping_link_start: enable_service_ping_link, generate_manually_link_start: generate_manually_link, link_end: '</a>'.html_safe }
diff --git a/data/deprecations/16-1-dependency-proxy-graphql-field.yml b/data/deprecations/16-1-dependency-proxy-graphql-field.yml
new file mode 100644
index 00000000000..80b43e33f41
--- /dev/null
+++ b/data/deprecations/16-1-dependency-proxy-graphql-field.yml
@@ -0,0 +1,11 @@
+- title: "GraphQL deprecation of `dependencyProxyTotalSizeInBytes` field" # (required) Actionable title. e.g., The `confidential` field for a `Note` is deprecated. Use `internal` instead.
+ announcement_milestone: "16.1" # (required) The milestone when this feature was first announced as deprecated.
+ removal_milestone: "17.0" # (required) The milestone when this feature is planned to be removed
+ breaking_change: true # (required) If this deprecation is a breaking change, set this value to true
+ reporter: trizzi # (required) GitLab username of the person reporting the deprecation
+ stage: Package # (required) String value of the stage that the feature was created in. e.g., Growth
+ issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/414236 # (required) Link to the deprecation issue in GitLab
+ body: | # (required) Do not modify this line, instead modify the lines below.
+ You can use GraphQL to query the amount of storage used by the GitLab Dependency Proxy. However, the `dependencyProxyTotalSizeInBytes` field is limited to ~2Gb (in bytes), which is not always large enough for the Dependency Proxy. As a result, `dependencyProxyTotalSizeInBytes` is deprecated and will be removed in GitLab 17.0.
+
+ Use `dependencyProxyTotalSizeBytes` instead, introduced in GitLab 16.1.
diff --git a/doc/administration/audit_event_streaming.md b/doc/administration/audit_event_streaming.md
index f4d0d7d780f..af97e902645 100644
--- a/doc/administration/audit_event_streaming.md
+++ b/doc/administration/audit_event_streaming.md
@@ -514,13 +514,26 @@ To list streaming destinations for an instance and see the verification tokens:
## Event type filters
-> Event type filters API [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/344845) in GitLab 15.7.
+> - Event type filters API [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/344845) in GitLab 15.7.
+> - Event type filtering in the UI with a defined list of audit event types [introduced](https://gitlab.com/gitlab-org/gitlab/-/issues/413581) in GitLab 16.1.
-When this feature is enabled for a group, you can use an API to permit users to filter streamed audit events per destination.
+When this feature is enabled for a group, you can use the GitLab UI or an API to permit users to filter streamed audit events per destination.
If the feature is enabled with no filters, the destination receives all audit events.
A streaming destination that has an event type filter set has a **filtered** (**{filter}**) label.
+### Use the GitLab UI
+
+To update a streaming destination's event filters:
+
+1. On the top bar, select **Main menu > Groups** and find your group.
+1. On the left sidebar, select **Security and Compliance > Audit events**.
+1. On the main area, select the **Streams** tab.
+1. To the right of the item, select **Edit** (**{pencil}**).
+1. Select **Filter by stream event**.
+1. Select the dropdown list and select or unselect the required event types.
+1. Select **Save** to update the event filters.
+
### Use the API to add an event type filter
Prerequisites:
diff --git a/doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md b/doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md
index ddee79046f6..fc319fad3e8 100644
--- a/doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md
+++ b/doc/administration/troubleshooting/gitlab_rails_cheat_sheet.md
@@ -92,4 +92,4 @@ Moved to [Geo replication troubleshooting](../geo/replication/troubleshooting.md
## Generate Service Ping
-This content has been moved to [Service Ping Troubleshooting](../../development/service_ping/troubleshooting.md).
+This content has been moved to [Service Ping Troubleshooting](../../development/internal_analytics/service_ping/troubleshooting.md).
diff --git a/doc/api/usage_data.md b/doc/api/usage_data.md
index bf8924c1578..1a2c3e95002 100644
--- a/doc/api/usage_data.md
+++ b/doc/api/usage_data.md
@@ -7,7 +7,7 @@ type: reference, api
# Service Data API **(FREE SELF)**
-The Service Data API is associated with [Service Ping](../development/service_ping/index.md).
+The Service Data API is associated with [Service Ping](../development/internal_analytics/service_ping/index.md).
## Export metric definitions as a single YAML file
diff --git a/doc/ci/pipelines/pipeline_security.md b/doc/ci/pipelines/pipeline_security.md
new file mode 100644
index 00000000000..f035779e665
--- /dev/null
+++ b/doc/ci/pipelines/pipeline_security.md
@@ -0,0 +1,48 @@
+---
+stage: Verify
+group: Pipeline Security
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+type: reference
+---
+
+# Pipeline security
+
+## Secrets Management
+
+Secrets management is the systems that developers use to securely store sensitive data
+in a secure environment with strict access controls. A **secret** is a sensitive credential
+that should be kept confidential, and includes:
+
+- Passwords
+- SSH keys
+- Access tokens
+- Other types of credentials
+
+## Secrets storage
+
+### Secrets management providers
+
+Secrets that are the most sensitive and under the strictest policies should be stored
+in a separate secrets management provider such as [Vault](https://www.vaultproject.io).
+The secrets are stored outside of the GitLab instance, which is the safest option.
+
+You can use the GitLab [Vault integration](../secrets/index.md#use-vault-secrets-in-a-ci-job)
+to retrieve those secrets in CI/CD pipelines when they are needed.
+
+### CI/CD variables
+
+[CI/CD Variables](../variables/index.md) are a convenient way to store and use data
+in a CI/CD pipeline, but variables are less secure than secrets management providers.
+Variable values:
+
+- Are stored in the GitLab project, group, or instance settings. Users with access
+ to the settings have access to the variables.
+- Can be [overridden](../variables/index.md#override-a-defined-cicd-variable),
+ making it hard to determine which value was used.
+- Are more easily exposed by accidental pipeline misconfiguration.
+
+Sensitive data should be stored in a secrets management solution. If there is low
+sensitivity data that you want to store in a CI/CD variable, be sure to always:
+
+- [Mask the variables](../variables/index.md#mask-a-cicd-variable).
+- [Protect the variables](../variables/index.md#protect-a-cicd-variable) when possible.
diff --git a/doc/development/cicd/templates.md b/doc/development/cicd/templates.md
index 1bf4a780e26..77e529867af 100644
--- a/doc/development/cicd/templates.md
+++ b/doc/development/cicd/templates.md
@@ -424,7 +424,7 @@ To add a metric definition for a new template:
1. Install and start the [GitLab GDK](https://gitlab.com/gitlab-org/gitlab-development-kit#installation).
1. In the `gitlab` directory in your GDK, check out the branch that contains the new template.
-1. [Add the template inclusion event](../service_ping/implement.md#add-new-events)
+1. [Add the template inclusion event](../internal_analytics/service_ping/implement.md#add-new-events)
with this Rake task:
```shell
@@ -445,7 +445,7 @@ To add a metric definition for a new template:
- [`config/metrics/counts_28d/20210216184559_ci_templates_total_unique_counts_monthly.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/counts_28d/20210216184559_ci_templates_total_unique_counts_monthly.yml)
1. Use the same `name` as above as the last argument in the following command to
- [add new metric definitions](../service_ping/metrics_dictionary.md#metrics-added-dynamic-to-service-ping-payload):
+ [add new metric definitions](../internal_analytics/service_ping/metrics_dictionary.md#metrics-added-dynamic-to-service-ping-payload):
```shell
bundle exec rails generate gitlab:usage_metric_definition:redis_hll ci_templates <template_metric_event_name>
@@ -466,7 +466,7 @@ To add a metric definition for a new template:
- `data_source:`: Set to `redis_hll`.
- `description`: Add a short description of what this metric counts, for example: `Count of pipelines using the latest Auto Deploy template`
- `product_*`: Set to [section, stage, group, and feature category](https://about.gitlab.com/handbook/product/categories/#devops-stages)
- as per the [metrics dictionary guide](../service_ping/metrics_dictionary.md#metrics-definition-and-validation).
+ as per the [metrics dictionary guide](../internal_analytics/service_ping/metrics_dictionary.md#metrics-definition-and-validation).
If you are unsure what to use for these keywords, you can ask for help in the merge request.
- Add the following to the end of each file:
diff --git a/doc/development/database/foreign_keys.md b/doc/development/database/foreign_keys.md
index 25b3d815d7a..5dda3dd55a3 100644
--- a/doc/development/database/foreign_keys.md
+++ b/doc/development/database/foreign_keys.md
@@ -195,5 +195,5 @@ end
```
Using a foreign key as primary key saves space but can make
-[batch counting](../service_ping/implement.md#batch-counters) in [Service Ping](../service_ping/index.md) less efficient.
+[batch counting](../internal_analytics/service_ping/implement.md#batch-counters) in [Service Ping](../service_ping/index.md) less efficient.
Consider using a regular `id` column if the table is relevant for Service Ping.
diff --git a/doc/development/database/query_performance.md b/doc/development/database/query_performance.md
index 10ab726940a..77067e2979d 100644
--- a/doc/development/database/query_performance.md
+++ b/doc/development/database/query_performance.md
@@ -22,7 +22,7 @@ When you are optimizing your SQL queries, there are two dimensions to pay attent
| Concurrent operations in a migration | `5min` | Concurrent operations do not block the database, but they block the GitLab update. This includes operations such as `add_concurrent_index` and `add_concurrent_foreign_key`. |
| Concurrent operations in a post migration | `20min` | Concurrent operations do not block the database, but they block the GitLab post update process. This includes operations such as `add_concurrent_index` and `add_concurrent_foreign_key`. If index creation exceeds 20 minutes, consider [async index creation](adding_database_indexes.md#create-indexes-asynchronously). |
| Background migrations | `1s` | |
-| Service Ping | `1s` | See the [Service Ping docs](../service_ping/implement.md) for more details. |
+| Service Ping | `1s` | See the [Service Ping docs](../internal_analytics/service_ping/implement.md) for more details. |
- When analyzing your query's performance, pay attention to if the time you are seeing is on a [cold or warm cache](#cold-and-warm-cache). These guidelines apply for both cache types.
- When working with batched queries, change the range and batch size to see how it effects the query timing and caching.
diff --git a/doc/development/internal_analytics/index.md b/doc/development/internal_analytics/index.md
new file mode 100644
index 00000000000..8abea4c2b2f
--- /dev/null
+++ b/doc/development/internal_analytics/index.md
@@ -0,0 +1,12 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Internal analytics
+
+Learn how to implement internal analytics using:
+
+- [Service Ping](service_ping/index.md)
+- [Snowplow](snowplow/index.md)
diff --git a/doc/development/internal_analytics/service_ping/implement.md b/doc/development/internal_analytics/service_ping/implement.md
new file mode 100644
index 00000000000..0dfc3806712
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/implement.md
@@ -0,0 +1,882 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Implement Service Ping
+
+Service Ping consists of two kinds of data:
+
+- **Counters**: Track how often a certain event happened over time, such as how many CI/CD pipelines have run.
+ They are monotonic and usually trend up.
+- **Observations**: Facts collected from one or more GitLab instances and can carry arbitrary data.
+ There are no general guidelines for how to collect those, due to the individual nature of that data.
+
+To implement a new metric in Service Ping, follow these steps:
+
+1. [Implement the required counter](#types-of-counters)
+1. [Name and place the metric](metrics_dictionary.md#metric-key_path)
+1. [Test counters manually using your Rails console](#test-counters-manually-using-your-rails-console)
+1. [Generate the SQL query](#generate-the-sql-query)
+1. [Optimize queries with Database Lab](#optimize-queries-with-database-lab)
+1. [Add the metric definition to the Metrics Dictionary](#add-the-metric-definition)
+1. [Add the metric to the Versions Application](#add-the-metric-to-the-versions-application)
+1. [Create a merge request](#create-a-merge-request)
+1. [Verify your metric](#verify-your-metric)
+1. [Set up and test Service Ping locally](#set-up-and-test-service-ping-locally)
+
+## Instrumentation classes
+
+NOTE:
+Implementing metrics directly in `usage_data.rb` is deprecated.
+When you add or change a Service Ping Metric, you must migrate metrics to [instrumentation classes](metrics_instrumentation.md).
+For information about the progress on migrating Service Ping metrics, see this [epic](https://gitlab.com/groups/gitlab-org/-/epics/5547).
+
+For example, we have the following instrumentation class:
+`lib/gitlab/usage/metrics/instrumentations/count_boards_metric.rb`.
+
+You should add it to `usage_data.rb` as follows:
+
+```ruby
+boards: add_metric('CountBoardsMetric', time_frame: 'all'),
+```
+
+## Types of counters
+
+There are several types of counters for metrics:
+
+- **[Batch counters](#batch-counters)**: Used for counts, sums, and averages.
+- **[Redis counters](#redis-counters):** Used for in-memory counts.
+- **[Alternative counters](#alternative-counters):** Used for settings and configurations.
+
+NOTE:
+Only use the provided counter methods. Each counter method contains a built-in fail-safe mechanism that isolates each counter to avoid breaking the entire Service Ping process.
+
+### Batch counters
+
+For large tables, PostgreSQL can take a long time to count rows due to MVCC [(Multi-version Concurrency Control)](https://en.wikipedia.org/wiki/Multiversion_concurrency_control). Batch counting is a counting method where a single large query is broken into multiple smaller queries. For example, instead of a single query querying 1,000,000 records, with batch counting, you can execute 100 queries of 10,000 records each. Batch counting is useful for avoiding database timeouts as each batch query is significantly shorter than one single long running query.
+
+For GitLab.com, there are extremely large tables with 15 second query timeouts, so we use batch counting to avoid encountering timeouts. Here are the sizes of some GitLab.com tables:
+
+| Table | Row counts in millions |
+|------------------------------|------------------------|
+| `merge_request_diff_commits` | 2280 |
+| `ci_build_trace_sections` | 1764 |
+| `merge_request_diff_files` | 1082 |
+| `events` | 514 |
+
+Batch counting requires indexes on columns to calculate max, min, and range queries. In some cases,
+you must add a specialized index on the columns involved in a counter.
+
+#### Ordinary batch counters
+
+Create a new [database metrics](metrics_instrumentation.md#database-metrics) instrumentation class with `count` operation for a given `ActiveRecord_Relation`
+
+Method:
+
+```ruby
+add_metric('CountIssuesMetric', time_frame: 'all')
+```
+
+Examples:
+
+Examples using `usage_data.rb` have been [deprecated](usage_data.md). We recommend to use [instrumentation classes](metrics_instrumentation.md).
+
+#### Distinct batch counters
+
+Create a new [database metrics](metrics_instrumentation.md#database-metrics) instrumentation class with `distinct_count` operation for a given `ActiveRecord_Relation`.
+
+Method:
+
+```ruby
+add_metric('CountUsersAssociatingMilestonesToReleasesMetric', time_frame: 'all')
+```
+
+WARNING:
+Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../../database/iterating_tables_in_batches.md) guide.
+
+Examples:
+
+Examples using `usage_data.rb` have been [deprecated](usage_data.md). We recommend to use [instrumentation classes](metrics_instrumentation.md).
+
+#### Sum batch operation
+
+Sum the values of a given ActiveRecord_Relation on given column and handles errors.
+Handles the `ActiveRecord::StatementInvalid` error
+
+Method:
+
+```ruby
+add_metric('JiraImportsTotalImportedIssuesCountMetric')
+```
+
+#### Average batch operation
+
+Average the values of a given `ActiveRecord_Relation` on given column and handles errors.
+
+Method:
+
+```ruby
+add_metric('CountIssuesWeightAverageMetric')
+```
+
+Examples:
+
+Examples using `usage_data.rb` have been [deprecated](usage_data.md). We recommend to use [instrumentation classes](metrics_instrumentation.md).
+
+#### Grouping and batch operations
+
+The `count`, `distinct_count` and `sum` batch counters can accept an `ActiveRecord::Relation`
+object, which groups by a specified column. With a grouped relation, the methods do batch counting,
+handle errors, and returns a hash table of key-value pairs.
+
+Examples:
+
+```ruby
+count(Namespace.group(:type))
+# returns => {nil=>179, "Group"=>54}
+
+distinct_count(Project.group(:visibility_level), :creator_id)
+# returns => {0=>1, 10=>1, 20=>11}
+
+sum(Issue.group(:state_id), :weight))
+# returns => {1=>3542, 2=>6820}
+```
+
+#### Add operation
+
+Sum the values given as parameters. Handles the `StandardError`.
+Returns `-1` if any of the arguments are `-1`.
+
+Method:
+
+```ruby
+add(*args)
+```
+
+Examples:
+
+```ruby
+project_imports = distinct_count(::Project.where.not(import_type: nil), :creator_id)
+bulk_imports = distinct_count(::BulkImport, :user_id)
+
+ add(project_imports, bulk_imports)
+```
+
+#### Estimated batch counters
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/48233) in GitLab 13.7.
+
+Estimated batch counter functionality handles `ActiveRecord::StatementInvalid` errors
+when used through the provided `estimate_batch_distinct_count` method.
+Errors return a value of `-1`.
+
+WARNING:
+This functionality estimates a distinct count of a specific ActiveRecord_Relation in a given column,
+which uses the [HyperLogLog](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/40671.pdf) algorithm.
+As the HyperLogLog algorithm is probabilistic, the **results always include error**.
+The highest encountered error rate is 4.9%.
+
+When correctly used, the `estimate_batch_distinct_count` method enables efficient counting over
+columns that contain non-unique values, which cannot be assured by other counters.
+
+##### `estimate_batch_distinct_count` method
+
+Method:
+
+```ruby
+estimate_batch_distinct_count(relation, column = nil, batch_size: nil, start: nil, finish: nil)
+```
+
+The [method](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/utils/usage_data.rb#L63)
+includes the following arguments:
+
+- `relation`: The ActiveRecord_Relation to perform the count.
+- `column`: The column to perform the distinct count. The default is the primary key.
+- `batch_size`: From `Gitlab::Database::PostgresHll::BatchDistinctCounter::DEFAULT_BATCH_SIZE`. Default value: 10,000.
+- `start`: The custom start of the batch count, to avoid complex minimum calculations.
+- `finish`: The custom end of the batch count to avoid complex maximum calculations.
+
+The method includes the following prerequisites:
+
+- The supplied `relation` must include the primary key defined as the numeric column.
+ For example: `id bigint NOT NULL`.
+- The `estimate_batch_distinct_count` can handle a joined relation. To use its ability to
+ count non-unique columns, the joined relation **must not** have a one-to-many relationship,
+ such as `has_many :boards`.
+- Both `start` and `finish` arguments should always represent primary key relationship values,
+ even if the estimated count refers to another column, for example:
+
+ ```ruby
+ estimate_batch_distinct_count(::Note, :author_id, start: ::Note.minimum(:id), finish: ::Note.maximum(:id))
+ ```
+
+Examples:
+
+1. Simple execution of estimated batch counter, with only relation provided,
+ returned value represents estimated number of unique values in `id` column
+ (which is the primary key) of `Project` relation:
+
+ ```ruby
+ estimate_batch_distinct_count(::Project)
+ ```
+
+1. Execution of estimated batch counter, where provided relation has applied
+ additional filter (`.where(time_period)`), number of unique values estimated
+ in custom column (`:author_id`), and parameters: `start` and `finish` together
+ apply boundaries that defines range of provided relation to analyze:
+
+ ```ruby
+ estimate_batch_distinct_count(::Note.with_suggestions.where(time_period), :author_id, start: ::Note.minimum(:id), finish: ::Note.maximum(:id))
+ ```
+
+When instrumenting metric with usage of estimated batch counter please add
+`_estimated` suffix to its name, for example:
+
+```ruby
+ "counts": {
+ "ci_builds_estimated": estimate_batch_distinct_count(Ci::Build),
+ ...
+```
+
+### Redis counters
+
+Handles `::Redis::CommandError` and `Gitlab::UsageDataCounters::BaseCounter::UnknownEvent`.
+Returns -1 when a block is sent or hash with all values and -1 when a `counter(Gitlab::UsageDataCounters)` is sent.
+The different behavior is due to 2 different implementations of the Redis counter.
+
+Method:
+
+```ruby
+redis_usage_data(counter, &block)
+```
+
+Arguments:
+
+- `counter`: a counter from `Gitlab::UsageDataCounters`, that has `fallback_totals` method implemented
+- or a `block`: which is evaluated
+
+#### Ordinary Redis counters
+
+Example of implementation: [`Gitlab::UsageDataCounters::WikiPageCounter`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/wiki_page_counter.rb), using Redis methods [`INCR`](https://redis.io/commands/incr/) and [`GET`](https://redis.io/commands/get/).
+
+Events are handled by counter classes in the `Gitlab::UsageDataCounters` namespace, inheriting from `BaseCounter`, that are either:
+
+1. Listed in [`Gitlab::UsageDataCounters::COUNTERS`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters.rb#L5) to be then included in `Gitlab::UsageData`.
+
+1. Specified in the metric definition using the `RedisMetric` instrumentation class by their `prefix` option to be picked up using the [metric instrumentation](metrics_instrumentation.md) framework. Refer to the [Redis metrics](metrics_instrumentation.md#redis-metrics) documentation for an example implementation.
+
+Inheriting classes are expected to override `KNOWN_EVENTS` and `PREFIX` constants to build event names and associated metrics. For example, for prefix `issues` and events array `%w[create, update, delete]`, three metrics will be added to the Service Ping payload: `counts.issues_create`, `counts.issues_update` and `counts.issues_delete`.
+
+##### `UsageData` API
+
+You can use the `UsageData` API to track events.
+To track events, the `usage_data_api` feature flag must
+be enabled (set to `default_enabled: true`).
+Enabled by default in GitLab 13.7 and later.
+
+##### UsageData API tracking
+
+1. Track events using the [`UsageData` API](#usagedata-api).
+
+ Increment event count using an ordinary Redis counter, for a given event name.
+
+ API requests are protected by checking for a valid CSRF token.
+
+ ```plaintext
+ POST /usage_data/increment_counter
+ ```
+
+ | Attribute | Type | Required | Description |
+ | :-------- | :--- | :------- | :---------- |
+ | `event` | string | yes | The event name to track. |
+
+ Response:
+
+ - `200` if the event was tracked.
+ - `400 Bad request` if the event parameter is missing.
+ - `401 Unauthorized` if the user is not authenticated.
+ - `403 Forbidden` if an invalid CSRF token is provided.
+
+1. Track events using the JavaScript/Vue API helper which calls the [`UsageData` API](#usagedata-api).
+
+ To track events, `usage_data_api` and `usage_data_#{event_name}` must be enabled.
+
+ ```javascript
+ import api from '~/api';
+
+ api.trackRedisCounterEvent('my_already_defined_event_name'),
+ ```
+
+#### Redis HLL counters
+
+WARNING:
+HyperLogLog (HLL) is a probabilistic algorithm and its **results always includes some small error**. According to [Redis documentation](https://redis.io/commands/pfcount/), data from
+used HLL implementation is "approximated with a standard error of 0.81%".
+
+NOTE:
+ A user's consent for `usage_stats` (`User.single_user&.requires_usage_stats_consent?`) is not checked during the data tracking stage due to performance reasons. Keys corresponding to those counters are present in Redis even if `usage_stats_consent` is still required. However, no metric is collected from Redis and reported back to GitLab as long as `usage_stats_consent` is required.
+
+With `Gitlab::UsageDataCounters::HLLRedisCounter` we have available data structures used to count unique values.
+
+Implemented using Redis methods [PFADD](https://redis.io/commands/pfadd/) and [PFCOUNT](https://redis.io/commands/pfcount/).
+
+##### Add new events
+
+1. Define events in [`known_events`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/).
+
+ Example event:
+
+ ```yaml
+ - name: users_creating_epics
+ aggregation: weekly
+ ```
+
+ Keys:
+
+ - `name`: unique event name.
+
+ Name format for Redis HLL events `{hll_counters}_<name>`
+
+ Example names: `users_creating_epics`, `users_triggering_security_scans`.
+
+ - `aggregation`: may be set to a `:daily` or `:weekly` key. Defines how counting data is stored in Redis.
+ Aggregation on a `daily` basis does not pull more fine grained data.
+
+1. Use one of the following methods to track the event:
+
+ - In the controller using the `ProductAnalyticsTracking` module and the following format:
+
+ ```ruby
+ track_event(*controller_actions, name:, action:, label:, conditions: nil, destinations: [:redis_hll], &block)
+ ```
+
+ Arguments:
+
+ - `controller_actions`: the controller actions to track.
+ - `name`: the event name.
+ - `action`: required if destination is `:snowplow. Action name for the triggered event. See [event schema](../snowplow/index.md#event-schema) for more details.
+ - `label`: required if destination is `:snowplow. Label for the triggered event. See [event schema](../snowplow/index.md#event-schema) for more details.
+ - `conditions`: optional custom conditions. Uses the same format as Rails callbacks.
+ - `destinations`: optional list of destinations. Currently supports `:redis_hll` and `:snowplow`. Default: `:redis_hll`.
+ - `&block`: optional block that computes and returns the `custom_id` that we want to track. This overrides the `visitor_id`.
+
+ Example:
+
+ ```ruby
+ # controller
+ class ProjectsController < Projects::ApplicationController
+ include ProductAnalyticsTracking
+
+ skip_before_action :authenticate_user!, only: :show
+ track_event :index, :show,
+ name: 'users_visiting_projects',
+ action: 'user_perform_visit',
+ label: 'redis_hll_counters.users_visiting_project_monthly',
+ destinations: %i[redis_hll snowplow]
+
+ def index
+ render html: 'index'
+ end
+
+ def new
+ render html: 'new'
+ end
+
+ def show
+ render html: 'show'
+ end
+ end
+ ```
+
+ - In the API using the `increment_unique_values(event_name, values)` helper method.
+
+ Arguments:
+
+ - `event_name`: the event name.
+ - `values`: the values counted. Can be one value or an array of values.
+
+ Example:
+
+ ```ruby
+ get ':id/registry/repositories' do
+ repositories = ContainerRepositoriesFinder.new(
+ user: current_user, subject: user_group
+ ).execute
+
+ increment_unique_values('users_listing_repositories', current_user.id)
+
+ present paginate(repositories), with: Entities::ContainerRegistry::Repository, tags: params[:tags], tags_count: params[:tags_count]
+ end
+ ```
+
+ - Using `track_usage_event(event_name, values)` in services and GraphQL.
+
+ Increment unique values count using Redis HLL, for a given event name.
+
+ Examples:
+
+ - [Track usage event for an incident in a service](https://gitlab.com/gitlab-org/gitlab/-/blob/v13.8.3-ee/app/services/issues/update_service.rb#L66)
+ - [Track usage event for an incident in GraphQL](https://gitlab.com/gitlab-org/gitlab/-/blob/v13.8.3-ee/app/graphql/mutations/alert_management/update_alert_status.rb#L16)
+
+ ```ruby
+ track_usage_event(:incident_management_incident_created, current_user.id)
+ ```
+
+ - Using the [`UsageData` API](#usagedata-api).
+
+ Increment unique users count using Redis HLL, for a given event name.
+
+ API requests are protected by checking for a valid CSRF token.
+
+ ```plaintext
+ POST /usage_data/increment_unique_users
+ ```
+
+ | Attribute | Type | Required | Description |
+ | :-------- | :--- | :------- | :---------- |
+ | `event` | string | yes | The event name to track |
+
+ Response:
+
+ - `200` if the event was tracked, or if tracking failed for any reason.
+ - `400 Bad request` if an event parameter is missing.
+ - `401 Unauthorized` if the user is not authenticated.
+ - `403 Forbidden` if an invalid CSRF token is provided.
+
+ - Using the JavaScript/Vue API helper, which calls the [`UsageData` API](#usagedata-api).
+
+ Example for an existing event already defined in [known events](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/):
+
+ ```javascript
+ import api from '~/api';
+
+ api.trackRedisHllUserEvent('my_already_defined_event_name'),
+ ```
+
+1. Get event data using `Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(event_names:, start_date:, end_date:, context: '')`.
+
+ Arguments:
+
+ - `event_names`: the list of event names.
+ - `start_date`: start date of the period for which we want to get event data.
+ - `end_date`: end date of the period for which we want to get event data.
+ - `context`: context of the event. Allowed values are `default`, `free`, `bronze`, `silver`, `gold`, `starter`, `premium`, `ultimate`.
+
+1. Testing tracking and getting unique events
+
+Trigger events in rails console by using `track_event` method
+
+ ```ruby
+ Gitlab::UsageDataCounters::HLLRedisCounter.track_event('users_viewing_compliance_audit_events', values: 1)
+ Gitlab::UsageDataCounters::HLLRedisCounter.track_event('users_viewing_compliance_audit_events', values: [2, 3])
+ ```
+
+Next, get the unique events for the current week.
+
+ ```ruby
+ # Get unique events for metric for current_week
+ Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(event_names: 'users_viewing_compliance_audit_events',
+ start_date: Date.current.beginning_of_week, end_date: Date.current.next_week)
+ ```
+
+##### Recommendations
+
+We have the following recommendations for [adding new events](#add-new-events):
+
+- Event aggregation: weekly.
+- When adding new metrics, use a [feature flag](../../../operations/feature_flags.md) to control the impact.
+It's recommended to disable the new feature flag by default (set `default_enabled: false`).
+- Events can be triggered using the `UsageData` API, which helps when there are > 10 events per change
+
+##### Enable or disable Redis HLL tracking
+
+We can disable tracking completely by using the global flag:
+
+```shell
+/chatops run feature set redis_hll_tracking true
+/chatops run feature set redis_hll_tracking false
+```
+
+##### Known events are added automatically in Service Data payload
+
+Service Ping adds all events [`known_events/*.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events) to Service Data generation under the `redis_hll_counters` key. This column is stored in [version-app as a JSON](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/db/schema.rb#L209).
+For each event we add metrics for the weekly and monthly time frames, and totals for each where applicable:
+
+- `#{event_name}_weekly`: Data for 7 days for daily [aggregation](#add-new-events) events and data for the last complete week for weekly [aggregation](#add-new-events) events.
+- `#{event_name}_monthly`: Data for 28 days for daily [aggregation](#add-new-events) events and data for the last 4 complete weeks for weekly [aggregation](#add-new-events) events.
+
+Example of `redis_hll_counters` data:
+
+```ruby
+{:redis_hll_counters=>
+ {"compliance"=>
+ {"users_viewing_compliance_dashboard_weekly"=>0,
+ "users_viewing_compliance_dashboard_monthly"=>0,
+ "users_viewing_compliance_audit_events_weekly"=>0,
+ "users_viewing_audit_events_monthly"=>0,
+ "compliance_total_unique_counts_weekly"=>0,
+ "compliance_total_unique_counts_monthly"=>0},
+ "analytics"=>
+ {"users_viewing_analytics_group_devops_adoption_weekly"=>0,
+ "users_viewing_analytics_group_devops_adoption_monthly"=>0,
+ "analytics_total_unique_counts_weekly"=>0,
+ "analytics_total_unique_counts_monthly"=>0},
+ "ide_edit"=>
+ {"users_editing_by_web_ide_weekly"=>0,
+ "users_editing_by_web_ide_monthly"=>0,
+ "users_editing_by_sfe_weekly"=>0,
+ "users_editing_by_sfe_monthly"=>0,
+ "ide_edit_total_unique_counts_weekly"=>0,
+ "ide_edit_total_unique_counts_monthly"=>0}
+ }
+}
+```
+
+Example:
+
+```ruby
+# Redis Counters
+redis_usage_data(Gitlab::UsageDataCounters::WikiPageCounter)
+
+# Define events in common.yml https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/common.yml
+
+# Tracking events
+Gitlab::UsageDataCounters::HLLRedisCounter.track_event('users_expanding_vulnerabilities', values: visitor_id)
+
+# Get unique events for metric
+redis_usage_data { Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(event_names: 'users_expanding_vulnerabilities', start_date: 28.days.ago, end_date: Date.current) }
+```
+
+### Alternative counters
+
+Handles `StandardError` and fallbacks into -1 this way not all measures fail if we encounter one exception.
+Mainly used for settings and configurations.
+
+Method:
+
+```ruby
+alt_usage_data(value = nil, fallback: -1, &block)
+```
+
+Arguments:
+
+- `value`: a static value in which case the value is returned.
+- or a `block`: which is evaluated
+- `fallback: -1`: the common value used for any metrics that are failing.
+
+Example:
+
+```ruby
+alt_usage_data { Gitlab::VERSION }
+alt_usage_data { Gitlab::CurrentSettings.uuid }
+alt_usage_data(999)
+```
+
+### Add counters to build new metrics
+
+When adding the results of two counters, use the `add` Service Data method that
+handles fallback values and exceptions. It also generates a valid [SQL export](index.md#export-service-ping-data).
+
+Example:
+
+```ruby
+add(User.active, User.bot)
+```
+
+### Prometheus queries
+
+In those cases where operational metrics should be part of Service Ping, a database or Redis query is unlikely
+to provide useful data. Instead, Prometheus might be more appropriate, because most GitLab architectural
+components publish metrics to it that can be queried back, aggregated, and included as Service Data.
+
+NOTE:
+Prometheus as a data source for Service Ping is only available for single-node Omnibus installations
+that are running the [bundled Prometheus](../../../administration/monitoring/prometheus/index.md) instance.
+
+To query Prometheus for metrics, a helper method is available to `yield` a fully configured
+`PrometheusClient`, given it is available as per the note above:
+
+```ruby
+with_prometheus_client do |client|
+ response = client.query('<your query>')
+ ...
+end
+```
+
+Refer to [the `PrometheusClient` definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/prometheus_client.rb)
+for how to use its API to query for data.
+
+### Fallback values for Service Ping
+
+We return fallback values in these cases:
+
+| Case | Value |
+|-----------------------------|-------|
+| Deprecated Metric ([Removed with version 14.3](https://gitlab.com/gitlab-org/gitlab/-/issues/335894)) | -1000 |
+| Timeouts, general failures | -1 |
+| Standard errors in counters | -2 |
+| Histogram metrics failure | { '-1' => -1 } |
+
+## Test counters manually using your Rails console
+
+```ruby
+# count
+Gitlab::UsageData.count(User.active)
+Gitlab::UsageData.count(::Clusters::Cluster.aws_installed.enabled, :cluster_id)
+
+# count distinct
+Gitlab::UsageData.distinct_count(::Project, :creator_id)
+Gitlab::UsageData.distinct_count(::Note.with_suggestions.where(time_period), :author_id, start: ::User.minimum(:id), finish: ::User.maximum(:id))
+```
+
+## Generate the SQL query
+
+Your Rails console returns the generated SQL queries. For example:
+
+```ruby
+pry(main)> Gitlab::UsageData.count(User.active)
+ (2.6ms) SELECT "features"."key" FROM "features"
+ (15.3ms) SELECT MIN("users"."id") FROM "users" WHERE ("users"."state" IN ('active')) AND ("users"."user_type" IS NULL OR "users"."user_type" IN (6, 4))
+ (2.4ms) SELECT MAX("users"."id") FROM "users" WHERE ("users"."state" IN ('active')) AND ("users"."user_type" IS NULL OR "users"."user_type" IN (6, 4))
+ (1.9ms) SELECT COUNT("users"."id") FROM "users" WHERE ("users"."state" IN ('active')) AND ("users"."user_type" IS NULL OR "users"."user_type" IN (6, 4)) AND "users"."id" BETWEEN 1 AND 100000
+```
+
+## Optimize queries with Database Lab
+
+[Database Lab](../../database/database_lab.md) is a service that uses a production clone to test queries.
+
+- GitLab.com's production database has a 15 second timeout.
+- Any single query must stay below the [1 second execution time](../../database/query_performance.md#timing-guidelines-for-queries) with cold caches.
+- Add a specialized index on columns involved to reduce the execution time.
+
+To understand the query's execution, we add the following information
+to a merge request description:
+
+- For counters that have a `time_period` test, we add information for both:
+ - `time_period = {}` for all time periods.
+ - `time_period = { created_at: 28.days.ago..Time.current }` for the last 28 days.
+- Execution plan and query time before and after optimization.
+- Query generated for the index and time.
+- Migration output for up and down execution.
+
+For more details, see the [database review guide](../../database_review.md#preparation-when-adding-or-modifying-queries).
+
+### Optimization recommendations and examples
+
+- Use specialized indexes. For examples, see these merge requests:
+ - [Example 1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26871)
+ - [Example 2](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26445)
+- Use defined `start` and `finish`. These values can be memoized and reused, as in this
+ [example merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/37155).
+- Avoid joins and unnecessary complexity in your queries. See this
+ [example merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/36316) as an example.
+- Set a custom `batch_size` for `distinct_count`, as in this [example merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38000).
+
+## Add the metric definition
+
+See the [Metrics Dictionary guide](metrics_dictionary.md) for more information.
+
+## Add the metric to the Versions Application
+
+Check if the new metric must be added to the Versions Application. See the `usage_data` [schema](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/db/schema.rb#L147) and Service Data [parameters accepted](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/app/services/usage_ping.rb). Any metrics added under the `counts` key are saved in the `stats` column.
+
+## Create a merge request
+
+Create a merge request for the new Service Ping metric, and do the following:
+
+- Add the `feature` label to the merge request. A metric is a user-facing change and is part of expanding the Service Ping feature.
+- Add a changelog entry that complies with the [changelog entries guide](../../changelog.md).
+- Ask for an Analytics Instrumentation review.
+ On GitLab.com, we have DangerBot set up to monitor Analytics Instrumentation related files and recommend a [Analytics Instrumentation review](review_guidelines.md).
+
+## Verify your metric
+
+On GitLab.com, the Product Intelligence team regularly [monitors Service Ping](https://gitlab.com/groups/gitlab-org/-/epics/6000).
+They may alert you that your metrics need further optimization to run quicker and with greater success.
+
+The Service Ping JSON payload for GitLab.com is shared in the
+[#g_product_intelligence](https://gitlab.slack.com/archives/CL3A7GFPF) Slack channel every week.
+
+You may also use the [Service Ping QA dashboard](https://app.periscopedata.com/app/gitlab/632033/Usage-Ping-QA) to check how well your metric performs.
+The dashboard allows filtering by GitLab version, by "Self-managed" and "SaaS", and shows you how many failures have occurred for each metric. Whenever you notice a high failure rate, you can re-optimize your metric.
+
+Use [Metrics Dictionary](https://metrics.gitlab.com/) [copy query to clipboard feature](https://www.youtube.com/watch?v=n4o65ivta48&list=PL05JrBw4t0Krg3mbR6chU7pXtMt_es6Pb) to get a query ready to run in Sisense for a specific metric.
+
+## Set up and test Service Ping locally
+
+To set up Service Ping locally, you must:
+
+1. [Set up local repositories](#set-up-local-repositories).
+1. [Test local setup](#test-local-setup).
+1. Optional. [Test Prometheus-based Service Ping](#test-prometheus-based-service-ping).
+
+### Set up local repositories
+
+1. Clone and start [GitLab](https://gitlab.com/gitlab-org/gitlab-development-kit).
+1. Clone and start [Versions Application](https://gitlab.com/gitlab-services/version-gitlab-com).
+ Make sure you run `docker-compose up` to start a PostgreSQL and Redis instance.
+1. Point GitLab to the Versions Application endpoint instead of the default endpoint:
+ 1. Open [service_ping/submit_service.rb](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/services/service_ping/submit_service.rb#L5) locally and modify `STAGING_BASE_URL`.
+ 1. Set it to the local Versions Application URL: `http://localhost:3000`.
+
+### Test local setup
+
+1. Using the `gitlab` Rails console, manually trigger Service Ping:
+
+ ```ruby
+ GitlabServicePingWorker.new.perform('triggered_from_cron' => false)
+ ```
+
+1. Use the `versions` Rails console to check the Service Ping was successfully received,
+ parsed, and stored in the Versions database:
+
+ ```ruby
+ UsageData.last
+ ```
+
+## Test Prometheus-based Service Ping
+
+If the data submitted includes metrics [queried from Prometheus](#prometheus-queries)
+you want to inspect and verify, you must:
+
+- Ensure that a Prometheus server is running locally.
+- Ensure the respective GitLab components are exporting metrics to the Prometheus server.
+
+If you do not need to test data coming from Prometheus, no further action
+is necessary. Service Ping should degrade gracefully in the absence of a running Prometheus server.
+
+Three kinds of components may export data to Prometheus, and are included in Service Ping:
+
+- [`node_exporter`](https://github.com/prometheus/node_exporter): Exports node metrics
+ from the host machine.
+- [`gitlab-exporter`](https://gitlab.com/gitlab-org/gitlab-exporter): Exports process metrics
+ from various GitLab components.
+- Other various GitLab services, such as Sidekiq and the Rails server, which export their own metrics.
+
+### Test with an Omnibus container
+
+This is the recommended approach to test Prometheus-based Service Ping.
+
+To verify your change, build a new Omnibus image from your code branch using CI/CD, download the image,
+and run a local container instance:
+
+1. From your merge request, select the `qa` stage, then trigger the `e2e:package-and-test` job. This job triggers an Omnibus
+ build in a [downstream pipeline of the `omnibus-gitlab-mirror` project](https://gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/-/pipelines).
+1. In the downstream pipeline, wait for the `gitlab-docker` job to finish.
+1. Open the job logs and locate the full container name including the version. It takes the following form: `registry.gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/gitlab-ee:<VERSION>`.
+1. On your local machine, make sure you are signed in to the GitLab Docker registry. You can find the instructions for this in
+ [Authenticate to the GitLab Container Registry](../../../user/packages/container_registry/authenticate_with_container_registry.md).
+1. Once signed in, download the new image by using `docker pull registry.gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/gitlab-ee:<VERSION>`
+1. For more information about working with and running Omnibus GitLab containers in Docker, refer to [GitLab Docker images](../../../install/docker.md) documentation.
+
+### Test with GitLab development toolkits
+
+This is the less recommended approach, because it comes with a number of difficulties when emulating a real GitLab deployment.
+
+The [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit) is not set up to run a Prometheus server or `node_exporter` alongside other GitLab components. If you would
+like to do so, [Monitoring the GDK with Prometheus](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/prometheus/index.md#monitoring-the-gdk-with-prometheus) is a good start.
+
+The [GCK](https://gitlab.com/gitlab-org/gitlab-compose-kit) has limited support for testing Prometheus based Service Ping.
+By default, it comes with a fully configured Prometheus service that is set up to scrape a number of components.
+However, it has the following limitations:
+
+- It does not run a `gitlab-exporter` instance, so several `process_*` metrics from services such as Gitaly may be missing.
+- While it runs a `node_exporter`, `docker-compose` services emulate hosts, meaning that it normally reports itself as not associated
+ with any of the other running services. That is not how node metrics are reported in a production setup, where `node_exporter`
+ always runs as a process alongside other GitLab components on any given node. For Service Ping, none of the node data would therefore
+ appear to be associated to any of the services running, because they all appear to be running on different hosts. To alleviate this problem, the `node_exporter` in GCK was arbitrarily "assigned" to the `web` service, meaning only for this service `node_*` metrics appears in Service Ping.
+
+## Aggregated metrics
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45979) in GitLab 13.6.
+
+WARNING:
+This feature is intended solely for internal GitLab use.
+
+The aggregated metrics feature provides insight into the data attributes in a collection of Service Ping metrics.
+This aggregation allows you to count data attributes in events without counting each occurrence of the same data attribute in multiple events.
+For example, you can aggregate the number of users who perform several actions, such as creating a new issue and opening a new merge request.
+You can then count each user that performed any combination of these actions.
+
+### Defining aggregated metric via metric YAML definition
+
+To add data for aggregated metrics to the Service Ping payload,
+create metric YAML definition file following [Aggregated metric instrumentation guide](metrics_instrumentation.md#aggregated-metrics).
+
+### Redis sourced aggregated metrics
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45979) in GitLab 13.6.
+
+To declare the aggregate of events collected with [Redis HLL Counters](#redis-hll-counters),
+you must fulfill the following requirements:
+
+1. All events listed at `events` attribute must come from
+ [`known_events/*.yml`](#known-events-are-added-automatically-in-service-data-payload) files.
+1. All events listed at `events` attribute must have the same `aggregation` attribute.
+1. `time_frame` does not include `all` value, which is unavailable for Redis sourced aggregated metrics.
+
+While it is possible to aggregate EE-only events together with events that occur in all GitLab editions, it's important to remember that doing so may produce high variance between data collected from EE and CE GitLab instances.
+
+### Database sourced aggregated metrics
+
+> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/52784) in GitLab 13.9.
+
+To declare an aggregate of metrics based on events collected from database, follow
+these steps:
+
+1. [Persist the metrics for aggregation](#persist-metrics-for-aggregation).
+1. [Add new aggregated metric definition](#add-new-aggregated-metric-definition).
+
+#### Persist metrics for aggregation
+
+Only metrics calculated with [Estimated Batch Counters](#estimated-batch-counters)
+can be persisted for database sourced aggregated metrics. To persist a metric,
+inject a Ruby block into the
+[`estimate_batch_distinct_count`](#estimate_batch_distinct_count-method) method.
+This block should invoke the
+`Gitlab::Usage::Metrics::Aggregates::Sources::PostgresHll.save_aggregated_metrics`
+[method](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage/metrics/aggregates/sources/postgres_hll.rb#L21),
+which stores `estimate_batch_distinct_count` results for future use in aggregated metrics.
+
+The `Gitlab::Usage::Metrics::Aggregates::Sources::PostgresHll.save_aggregated_metrics`
+method accepts the following arguments:
+
+- `metric_name`: The name of metric to use for aggregations. Should be the same
+ as the key under which the metric is added into Service Ping.
+- `recorded_at_timestamp`: The timestamp representing the moment when a given
+ Service Ping payload was collected. You should use the convenience method `recorded_at`
+ to fill `recorded_at_timestamp` argument, like this: `recorded_at_timestamp: recorded_at`
+- `time_period`: The time period used to build the `relation` argument passed into
+ `estimate_batch_distinct_count`. To collect the metric with all available historical
+ data, set a `nil` value as time period: `time_period: nil`.
+- `data`: HyperLogLog buckets structure representing unique entries in `relation`.
+ The `estimate_batch_distinct_count` method always passes the correct argument
+ into the block, so `data` argument must always have a value equal to block argument,
+ like this: `data: result`
+
+Example metrics persistence:
+
+```ruby
+class UsageData
+ def count_secure_pipelines(time_period)
+ ...
+ relation = ::Security::Scan.by_scan_types(scan_type).where(time_period)
+
+ pipelines_with_secure_jobs['dependency_scanning_pipeline'] = estimate_batch_distinct_count(relation, :pipeline_id, batch_size: 1000, start: start_id, finish: finish_id) do |result|
+ ::Gitlab::Usage::Metrics::Aggregates::Sources::PostgresHll
+ .save_aggregated_metrics(metric_name: 'dependency_scanning_pipeline', recorded_at_timestamp: recorded_at, time_period: time_period, data: result)
+ end
+ end
+end
+```
+
+#### Add new aggregated metric definition
+
+After all metrics are persisted, you can add an aggregated metric definition following [Aggregated metric instrumentation guide](metrics_instrumentation.md#aggregated-metrics).
+To declare the aggregate of metrics collected with [Estimated Batch Counters](#estimated-batch-counters),
+you must fulfill the following requirements:
+
+- Metrics names listed in the `events:` attribute, have to use the same names you passed in the `metric_name` argument while persisting metrics in previous step.
+- Every metric listed in the `events:` attribute, has to be persisted for **every** selected `time_frame:` value.
diff --git a/doc/development/internal_analytics/service_ping/index.md b/doc/development/internal_analytics/service_ping/index.md
new file mode 100644
index 00000000000..69d37f0dae2
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/index.md
@@ -0,0 +1,509 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Service Ping development guidelines
+
+> - Introduced in GitLab Ultimate 11.2, more statistics.
+> - In GitLab 14.1, [renamed from Usage Ping to Service Ping](https://gitlab.com/groups/gitlab-org/-/epics/5990). In 14.0 and earlier, use the Usage Ping documentation for the Rails commands appropriate to your version.
+
+Service Ping is a GitLab process that collects and sends a weekly payload to GitLab.
+The payload provides important high-level data that helps our product, support,
+and sales teams understand how GitLab is used. The data helps to:
+
+- Compare counts month over month (or week over week) to get a rough sense for how an instance uses
+ different product features.
+- Collect other facts that help us classify and understand GitLab installations.
+- Calculate our stage monthly active users (SMAU), which helps to measure the success of our stages
+ and features.
+
+Service Ping information is not anonymous. It's linked to the instance's hostname, but does
+not contain project names, usernames, or any other specific data.
+
+Service Ping is enabled by default. However, you can [disable](../../../user/admin_area/settings/usage_statistics.md#enable-or-disable-usage-statistics) it on any self-managed instance. When Service Ping is enabled, GitLab gathers data from the other instances and can show your instance's usage statistics to your users.
+
+## Service Ping terminology
+
+We use the following terminology to describe the Service Ping components:
+
+- **Service Ping**: the process that collects and generates a JSON payload.
+- **Service Data**: the contents of the Service Ping JSON payload. This includes metrics.
+- **Metrics**: primarily made up of row counts for different tables in an instance's database. Each
+ metric has a corresponding [metric definition](metrics_dictionary.md#metrics-definition-and-validation)
+ in a YAML file.
+- **MAU**: monthly active users.
+- **WAU**: weekly active users.
+
+### Limitations
+
+- Service Ping does not track frontend events things like page views, link clicks, or user sessions.
+- Service Ping focuses only on aggregated backend events.
+
+Because of these limitations we recommend you:
+
+- Instrument your products with Snowplow for more detailed analytics on GitLab.com.
+- Use Service Ping to track aggregated backend events on self-managed instances.
+
+## Service Ping request flow
+
+The following example shows a basic request/response flow between a GitLab instance, the Versions Application, the License Application, Salesforce, the GitLab S3 Bucket, the GitLab Snowflake Data Warehouse, and Sisense:
+
+```mermaid
+sequenceDiagram
+ participant GitLab Instance
+ participant Versions Application
+ participant Licenses Application
+ participant Salesforce
+ participant S3 Bucket
+ participant Snowflake DW
+ participant Sisense Dashboards
+ GitLab Instance->>Versions Application: Send Service Ping
+ loop Process usage data
+ Versions Application->>Versions Application: Parse usage data
+ Versions Application->>Versions Application: Write to database
+ Versions Application->>Versions Application: Update license ping time
+ end
+ loop Process data for Salesforce
+ Versions Application-xLicenses Application: Request Zuora subscription id
+ Licenses Application-xVersions Application: Zuora subscription id
+ Versions Application-xSalesforce: Request Zuora account id by Zuora subscription id
+ Salesforce-xVersions Application: Zuora account id
+ Versions Application-xSalesforce: Usage data for the Zuora account
+ end
+ Versions Application->>S3 Bucket: Export Versions database
+ S3 Bucket->>Snowflake DW: Import data
+ Snowflake DW->>Snowflake DW: Transform data using dbt
+ Snowflake DW->>Sisense Dashboards: Data available for querying
+ Versions Application->>GitLab Instance: DevOps Score (Conversational Development Index)
+```
+
+## How Service Ping works
+
+1. The Service Ping [cron job](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/gitlab_service_ping_worker.rb#L24) is set in Sidekiq to run weekly.
+1. When the cron job runs, it calls [`Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values)`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/services/service_ping/submit_service.rb).
+1. `Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values)` [cascades down](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb) to ~400+ other counter method calls.
+1. The response of all methods calls are [merged together](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb#L68) into a single JSON payload.
+1. The JSON payload is then [posted to the Versions application](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/services/service_ping/submit_service.rb#L20)
+ If a firewall exception is needed, the required URL depends on several things. If
+ the hostname is `version.gitlab.com`, the protocol is `TCP`, and the port number is `443`,
+ the required URL is <https://version.gitlab.com/>.
+1. In case of an error, it will be reported to the Version application along with following pieces of information:
+
+ - `uuid` - GitLab instance unique identifier
+ - `hostname` - GitLab instance hostname
+ - `version` - GitLab instance current versions
+ - `elapsed` - Amount of time which passed since Service Ping report process started and moment of error occurrence
+ - `message` - Error message
+
+ <pre>
+ <code>
+ {
+ "uuid"=>"02333324-1cd7-4c3b-a45b-a4993f05fb1d",
+ "hostname"=>"127.0.0.1",
+ "version"=>"14.7.0-pre",
+ "elapsed"=>0.006946,
+ "message"=>'PG::UndefinedColumn: ERROR: column \"non_existent_attribute\" does not exist\nLINE 1: SELECT COUNT(non_existent_attribute) FROM \"issues\" /*applica...'
+ }
+ </code>
+ </pre>
+
+1. Finally, the timing metadata information that is used for diagnostic purposes is submitted to the Versions application. It consists of a list of metric identifiers and the time it took to calculate the metrics:
+
+ > - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/37911) in GitLab 15.0 [with a flag](../../../user/feature_flags.md), enabled by default.
+ > - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/295289) in GitLab 15.2. [Feature flag `measure_service_ping_metric_collection`](https://gitlab.com/gitlab-org/gitlab/-/issues/358128) removed.
+
+```ruby
+ {
+ "metadata"=>
+ {
+ "uuid"=>"0000000-0000-0000-0000-000000000000",
+ "metrics"=>
+ [{"name"=>"version", "time_elapsed"=>1.1811964213848114e-05},
+ {"name"=>"installation_type", "time_elapsed"=>0.00017242692410945892},
+ {"name"=>"license_billable_users", "time_elapsed"=>0.009520471096038818},
+ ....
+ {"name"=>"counts.clusters_platforms_eks",
+ "time_elapsed"=>0.05638605775311589},
+ {"name"=>"counts.clusters_platforms_gke",
+ "time_elapsed"=>0.40995341585949063},
+ {"name"=>"counts.clusters_platforms_user",
+ "time_elapsed"=>0.06410990096628666},
+ {"name"=>"counts.clusters_management_project",
+ "time_elapsed"=>0.24020783510059118}
+ ]
+ }
+ }
+```
+
+### On a Geo secondary site
+
+We also collect metrics specific to [Geo](../../../administration/geo/index.md) secondary sites to send with Service Ping.
+
+1. The [Geo secondary service ping cron job](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/geo/secondary_usage_data_cron_worker.rb) is set in Sidekiq to run weekly.
+1. When the cron job runs, it calls [`SecondaryUsageData.update_metrics!`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/geo/secondary_usage_data.rb#L33). This collects the relevant metrics from Prometheus and stores the data in the Geo secondary tracking database for transmission to the primary site during a [Geo node status update](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/geo_node_status.rb#L105).
+1. Geo node status data is sent with the JSON payload in the process described above. The following is an example of the payload where each object in the array represents a Geo node:
+
+ ```json
+ [
+ {
+ "repository_verification_enabled"=>true,
+ "repositories_replication_enabled"=>true,
+ "repositories_synced_count"=>24,
+ "repositories_failed_count"=>0,
+ "git_fetch_event_count_weekly"=>nil,
+ "git_push_event_count_weekly"=>nil,
+ ... other geo node status fields
+ }
+ ]
+ ```
+
+## Implementing Service Ping
+
+See the [implement Service Ping](implement.md) guide.
+
+## Example Service Ping payload
+
+The following is example content of the Service Ping payload.
+
+```json
+{
+ "uuid": "0000000-0000-0000-0000-000000000000",
+ "hostname": "example.com",
+ "version": "12.10.0-pre",
+ "installation_type": "omnibus-gitlab",
+ "active_user_count": 999,
+ "recorded_at": "2020-04-17T07:43:54.162+00:00",
+ "edition": "EEU",
+ "license_md5": "00000000000000000000000000000000",
+ "license_sha256": "0000000000000000000000000000000000000000000000000000000000000000",
+ "license_id": null,
+ "historical_max_users": 999,
+ "licensee": {
+ "Name": "ABC, Inc.",
+ "Email": "email@example.com",
+ "Company": "ABC, Inc."
+ },
+ "license_user_count": 999,
+ "license_starts_at": "2020-01-01",
+ "license_expires_at": "2021-01-01",
+ "license_plan": "ultimate",
+ "license_add_ons": {
+ },
+ "license_trial": false,
+ "counts": {
+ "assignee_lists": 999,
+ "boards": 999,
+ "ci_builds": 999,
+ ...
+ },
+ "container_registry_enabled": true,
+ "dependency_proxy_enabled": false,
+ "gitlab_shared_runners_enabled": true,
+ "gravatar_enabled": true,
+ "influxdb_metrics_enabled": true,
+ "ldap_enabled": false,
+ "mattermost_enabled": false,
+ "omniauth_enabled": true,
+ "prometheus_enabled": false,
+ "prometheus_metrics_enabled": false,
+ "reply_by_email_enabled": "incoming+%{key}@incoming.gitlab.com",
+ "signup_enabled": true,
+ "projects_with_expiration_policy_disabled": 999,
+ "projects_with_expiration_policy_enabled": 999,
+ ...
+ "elasticsearch_enabled": true,
+ "license_trial_ends_on": null,
+ "geo_enabled": false,
+ "git": {
+ "version": {
+ "major": 2,
+ "minor": 26,
+ "patch": 1
+ }
+ },
+ "gitaly": {
+ "version": "12.10.0-rc1-93-g40980d40",
+ "servers": 56,
+ "clusters": 14,
+ "filesystems": [
+ "EXT_2_3_4"
+ ]
+ },
+ "gitlab_pages": {
+ "enabled": true,
+ "version": "1.17.0"
+ },
+ "container_registry_server": {
+ "vendor": "gitlab",
+ "version": "2.9.1-gitlab"
+ },
+ "database": {
+ "adapter": "postgresql",
+ "version": "9.6.15",
+ "pg_system_id": 6842684531675334351,
+ "flavor": "Cloud SQL for PostgreSQL"
+ },
+ "analytics_unique_visits": {
+ "g_analytics_contribution": 999,
+ ...
+ },
+ "usage_activity_by_stage": {
+ "configure": {
+ "project_clusters_enabled": 999,
+ ...
+ },
+ "create": {
+ "merge_requests": 999,
+ ...
+ },
+ "manage": {
+ "events": 999,
+ ...
+ },
+ "monitor": {
+ "clusters": 999,
+ ...
+ },
+ "package": {
+ "projects_with_packages": 999
+ },
+ "plan": {
+ "issues": 999,
+ ...
+ },
+ "release": {
+ "deployments": 999,
+ ...
+ },
+ "secure": {
+ "user_container_scanning_jobs": 999,
+ ...
+ },
+ "verify": {
+ "ci_builds": 999,
+ ...
+ }
+ },
+ "usage_activity_by_stage_monthly": {
+ "configure": {
+ "project_clusters_enabled": 999,
+ ...
+ },
+ "create": {
+ "merge_requests": 999,
+ ...
+ },
+ "manage": {
+ "events": 999,
+ ...
+ },
+ "monitor": {
+ "clusters": 999,
+ ...
+ },
+ "package": {
+ "projects_with_packages": 999
+ },
+ "plan": {
+ "issues": 999,
+ ...
+ },
+ "release": {
+ "deployments": 999,
+ ...
+ },
+ "secure": {
+ "user_container_scanning_jobs": 999,
+ ...
+ },
+ "verify": {
+ "ci_builds": 999,
+ ...
+ }
+ },
+ "topology": {
+ "duration_s": 0.013836685999194742,
+ "application_requests_per_hour": 4224,
+ "query_apdex_weekly_average": 0.996,
+ "failures": [],
+ "nodes": [
+ {
+ "node_memory_total_bytes": 33269903360,
+ "node_memory_utilization": 0.35,
+ "node_cpus": 16,
+ "node_cpu_utilization": 0.2,
+ "node_uname_info": {
+ "machine": "x86_64",
+ "sysname": "Linux",
+ "release": "4.19.76-linuxkit"
+ },
+ "node_services": [
+ {
+ "name": "web",
+ "process_count": 16,
+ "process_memory_pss": 233349888,
+ "process_memory_rss": 788220927,
+ "process_memory_uss": 195295487,
+ "server": "puma"
+ },
+ {
+ "name": "sidekiq",
+ "process_count": 1,
+ "process_memory_pss": 734080000,
+ "process_memory_rss": 750051328,
+ "process_memory_uss": 731533312
+ },
+ ...
+ ],
+ ...
+ },
+ ...
+ ]
+ }
+}
+```
+
+## Notable changes
+
+In GitLab 14.6, [`flavor`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75587) was added to try to detect the underlying managed database variant.
+Possible values are "Amazon Aurora PostgreSQL", "PostgreSQL on Amazon RDS", "Cloud SQL for PostgreSQL",
+"Azure Database for PostgreSQL - Flexible Server", or "null".
+
+In GitLab 13.5, `pg_system_id` was added to send the [PostgreSQL system identifier](https://www.2ndquadrant.com/en/blog/support-for-postgresqls-system-identifier-in-barman/).
+
+## Export Service Ping data
+
+Rake tasks exist to export Service Ping data in different formats.
+
+- The Rake tasks export the raw SQL queries for `count`, `distinct_count`, `sum`.
+- The Rake tasks export the Redis counter class or the line of the Redis block for `redis_usage_data`.
+- The Rake tasks calculate the `alt_usage_data` metrics.
+
+In the home directory of your local GitLab installation run the following Rake tasks for the YAML and JSON versions respectively:
+
+```shell
+# for YAML export of SQL queries
+bin/rake gitlab:usage_data:dump_sql_in_yaml
+
+# for JSON export of SQL queries
+bin/rake gitlab:usage_data:dump_sql_in_json
+
+# for JSON export of Non SQL data
+bin/rake gitlab:usage_data:dump_non_sql_in_json
+
+# You may pipe the output into a file
+bin/rake gitlab:usage_data:dump_sql_in_yaml > ~/Desktop/usage-metrics-2020-09-02.yaml
+```
+
+## Generate Service Ping
+
+To generate Service Ping, use [Teleport](https://goteleport.com/docs/) or a detached screen session on a remote server.
+
+### Triggering
+
+#### Trigger Service Ping with Teleport
+
+1. Request temporary [access](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/teleport/Connect_to_Rails_Console_via_Teleport.md#how-to-use-teleport-to-connect-to-rails-console) to the required environment.
+1. After your approval is issued, [access the Rails console](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/teleport/Connect_to_Rails_Console_via_Teleport.md#access-approval).
+1. Run `GitlabServicePingWorker.new.perform('triggered_from_cron' => false)`.
+
+#### Trigger Service Ping with a detached screen session
+
+1. Connect to bastion with agent forwarding:
+
+ ```shell
+ ssh -A lb-bastion.gprd.gitlab.com
+ ```
+
+1. Create named screen:
+
+ ```shell
+ screen -S <username>_usage_ping_<date>
+ ```
+
+1. Connect to console host:
+
+ ```shell
+ ssh $USER-rails@console-01-sv-gprd.c.gitlab-production.internal
+ ```
+
+1. Run:
+
+ ```shell
+ GitlabServicePingWorker.new.perform('triggered_from_cron' => false)
+ ```
+
+1. To detach from screen, press `ctrl + A`, `ctrl + D`.
+1. Exit from bastion:
+
+ ```shell
+ exit
+ ```
+
+1. Get the metrics duration from logs:
+
+Search in Google Console logs for `time_elapsed`. [Query example](https://cloudlogging.app.goo.gl/nWheZvD8D3nWazNe6).
+
+### Verification (After approx 30 hours)
+
+#### Verify with Teleport
+
+1. Follow [the steps](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/teleport/Connect_to_Rails_Console_via_Teleport.md#how-to-use-teleport-to-connect-to-rails-console) to request a new access to the required environment and connect to the Rails console
+1. Check the last payload in `raw_usage_data` table: `RawUsageData.last.payload`
+1. Check the when the payload was sent: `RawUsageData.last.sent_at`
+
+#### Verify using detached screen session
+
+1. Reconnect to bastion:
+
+ ```shell
+ ssh -A lb-bastion.gprd.gitlab.com
+ ```
+
+1. Find your screen session:
+
+ ```shell
+ screen -ls
+ ```
+
+1. Attach to your screen session:
+
+ ```shell
+ screen -x 14226.mwawrzyniak_usage_ping_2021_01_22
+ ```
+
+1. Check the last payload in `raw_usage_data` table:
+
+ ```shell
+ RawUsageData.last.payload
+ ```
+
+1. Check the when the payload was sent:
+
+ ```shell
+ RawUsageData.last.sent_at
+ ```
+
+### Skip database write operations
+
+To skip database write operations, DevOps report creation, and storage of usage data payload, pass an optional argument:
+
+```shell
+skip_db_write:
+GitlabServicePingWorker.new.perform('triggered_from_cron' => false, 'skip_db_write' => true)
+```
+
+## Monitoring
+
+Service Ping reporting process state is monitored with [internal SiSense dashboard](https://app.periscopedata.com/app/gitlab/968489/Product-Intelligence---Service-Ping-Health).
+
+## Related topics
+
+- [Product Intelligence Guide](https://about.gitlab.com/handbook/product/product-intelligence-guide/)
+- [Snowplow Guide](../snowplow/index.md)
+- [Product Intelligence Direction](https://about.gitlab.com/direction/analytics/product-intelligence/)
+- [Data Analysis Process](https://about.gitlab.com/handbook/business-technology/data-team/#data-analysis-process/)
+- [Data for Product Managers](https://about.gitlab.com/handbook/business-technology/data-team/programs/data-for-product-managers/)
+- [Data Infrastructure](https://about.gitlab.com/handbook/business-technology/data-team/platform/infrastructure/)
diff --git a/doc/development/internal_analytics/service_ping/metrics_dictionary.md b/doc/development/internal_analytics/service_ping/metrics_dictionary.md
new file mode 100644
index 00000000000..d1f1c0b595a
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/metrics_dictionary.md
@@ -0,0 +1,334 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Metrics Dictionary Guide
+
+[Service Ping](index.md) metrics are defined in individual YAML files definitions from which the
+[Metrics Dictionary](https://metrics.gitlab.com/) is built. Currently, the metrics dictionary is built automatically once a day. When a change to a metric is made in a YAML file, you can see the change in the dictionary within 24 hours.
+This guide describes the dictionary and how it's implemented.
+
+## Metrics Definition and validation
+
+We are using [JSON Schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json) to validate the metrics definition.
+
+This process is meant to ensure consistent and valid metrics defined for Service Ping. All metrics *must*:
+
+- Comply with the defined [JSON schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json).
+- Have a unique `key_path` .
+- Have an owner.
+
+All metrics are stored in YAML files:
+
+- [`config/metrics`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/metrics)
+
+WARNING:
+Only metrics with a metric definition YAML and whose status is not `removed` are added to the Service Ping JSON payload.
+
+Each metric is defined in a separate YAML file consisting of a number of fields:
+
+| Field | Required | Additional information |
+|---------------------|----------|----------------------------------------------------------------|
+| `key_path` | yes | JSON key path for the metric, location in Service Ping payload. |
+| `name` (deprecated) | no | Metric name suggestion. Does not have any impact on the Service Ping payload, only serves as documentation. |
+| `description` | yes | |
+| `product_section` | yes | The [section](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/data/sections.yml). |
+| `product_stage` | yes | The [stage](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) for the metric. |
+| `product_group` | yes | The [group](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) that owns the metric. |
+| `value_type` | yes | `string`; one of [`string`, `number`, `boolean`, `object`](https://json-schema.org/understanding-json-schema/reference/type.html). |
+| `status` | yes | `string`; [status](#metric-statuses) of the metric, may be set to `active`, `removed`, `broken`. |
+| `time_frame` | yes | `string`; may be set to a value like `7d`, `28d`, `all`, `none`. |
+| `data_source` | yes | `string`; may be set to a value like `database`, `redis`, `redis_hll`, `prometheus`, `system`, `license`. |
+| `data_category` | yes | `string`; [categories](#data-category) of the metric, may be set to `operational`, `optional`, `subscription`, `standard`. The default value is `optional`.|
+| `instrumentation_class` | yes | `string`; [the class that implements the metric](metrics_instrumentation.md). |
+| `distribution` | yes | `array`; may be set to one of `ce, ee` or `ee`. The [distribution](https://about.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/#definitions) where the tracked feature is available. |
+| `performance_indicator_type` | no | `array`; may be set to one of [`gmau`, `smau`, `paid_gmau`, `umau` or `customer_health_score`](https://about.gitlab.com/handbook/business-technology/data-team/data-catalog/xmau-analysis/). |
+| `tier` | yes | `array`; may contain one or a combination of `free`, `premium` or `ultimate`. The [tier](https://about.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/#definitions) where the tracked feature is available. This should be verbose and contain all tiers where a metric is available. |
+| `milestone` | yes | The milestone when the metric is introduced and when it's available to self-managed instances with the official GitLab release. |
+| `milestone_removed` | no | The milestone when the metric is removed. |
+| `introduced_by_url` | no | The URL to the merge request that introduced the metric to be available for self-managed instances. |
+| `removed_by_url` | no | The URL to the merge request that removed the metric. |
+| `repair_issue_url` | no | The URL of the issue that was created to repair a metric with a `broken` status. |
+| `options` | no | `object`: options information needed to calculate the metric value. |
+| `skip_validation` | no | This should **not** be set. [Used for imported metrics until we review, update and make them valid](https://gitlab.com/groups/gitlab-org/-/epics/5425). |
+
+### Metric `key_path`
+
+The `key_path` of the metric is the location in the JSON Service Ping payload.
+
+The `key_path` could be composed from multiple parts separated by `.` and it must be unique.
+
+We recommend to add the metric in one of the top-level keys:
+
+- `settings`: for settings related metrics.
+- `counts_weekly`: for counters that have data for the most recent 7 days.
+- `counts_monthly`: for counters that have data for the most recent 28 days.
+- `counts`: for counters that have data for all time.
+
+NOTE:
+We can't control what the metric's `key_path` is, because some of them are generated dynamically in `usage_data.rb`.
+For example, see [Redis HLL metrics](implement.md#redis-hll-counters).
+
+### Metric name (deprecated)
+
+WARNING:
+This feature was deprecated in GitLab 16.1
+and is planned for [removal](https://gitlab.com/gitlab-org/gitlab/-/issues/411602) in 16.2.
+
+To improve metric discoverability by a wider audience, each metric with
+instrumentation added at an appointed `key_path` receives a `name` attribute
+filled with the name suggestion, corresponding to the metric `data_source` and instrumentation.
+Metric name suggestions can contain two types of elements:
+
+1. **User input prompts**: enclosed by angle brackets (`< >`), these pieces should be replaced or
+ removed when you create a metrics YAML file.
+1. **Fixed suggestion**: plaintext parts generated according to well-defined algorithms.
+ They are based on underlying instrumentation, and must not be changed.
+
+For a metric name to be valid, it must not include any prompt, and fixed suggestions
+must not be changed.
+
+#### Generate a metric name suggestion (deprecated)
+
+WARNING:
+This feature was deprecated in GitLab 16.1
+and is planned for [removal](https://gitlab.com/gitlab-org/gitlab/-/issues/411602) in 16.2.
+
+The metric YAML generator can suggest a metric name for you.
+To generate a metric name suggestion, first instrument the metric at the provided `key_path`.
+Then, generate the metric's YAML definition and
+return to the instrumentation and update it.
+
+1. Add the metric instrumentation class to `lib/gitlab/usage/metrics/instrumentations/`.
+1. Add the metric logic in the instrumentation class.
+1. Run the [metrics YAML generator](metrics_dictionary.md#create-a-new-metric-definition).
+1. Use the metric name suggestion to select a suitable metric name.
+1. Update the metric's YAML definition with the correct `key_path`.
+
+### Metric statuses
+
+Metric definitions can have one of the following statuses:
+
+- `active`: Metric is used and reports data.
+- `broken`: Metric reports broken data (for example, -1 fallback), or does not report data at all. A metric marked as `broken` must also have the `repair_issue_url` attribute.
+- `removed`: Metric was removed, but it may appear in Service Ping payloads sent from instances running on older versions of GitLab.
+
+### Metric `value_type`
+
+Metric definitions can have one of the following values for `value_type`:
+
+- `boolean`
+- `number`
+- `string`
+- `object`: A metric with `value_type: object` must have `value_json_schema` with a link to the JSON schema for the object.
+In general, we avoid complex objects and prefer one of the `boolean`, `number`, or `string` value types.
+An example of a metric that uses `value_type: object` is `topology` (`/config/metrics/settings/20210323120839_topology.yml`),
+which has a related schema in `/config/metrics/objects_schemas/topology_schema.json`.
+
+### Metric `time_frame`
+
+A metric's time frame is calculated based on the `time_frame` field and the `data_source` of the metric.
+For `redis_hll` metrics, the type of aggregation is also taken into consideration. In this context, the term "aggregation" refers to [chosen events data storage interval](implement.md#add-new-events), and is **NOT** related to the Aggregated Metrics feature.
+For more information about the aggregation type of each feature, see the [`common.yml` file](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/common.yml). Weeks run from Monday to Sunday.
+
+| data_source | time_frame | aggregation | Description |
+|------------------------|------------|----------------|-------------------------------------------------|
+| any | `none` | not applicable | A type of data that’s not tracked over time, such as settings and configuration information |
+| `database` | `all` | not applicable | The whole time the metric has been active (all-time interval) |
+| `database` | `7d` | not applicable | 9 days ago to 2 days ago |
+| `database` | `28d` | not applicable | 30 days ago to 2 days ago |
+| `redis` | `all` | not applicable | The whole time the metric has been active (all-time interval) |
+| `redis_hll` | `7d` | `daily` | Most recent 7 complete days |
+| `redis_hll` | `7d` | `weekly` | Most recent complete week |
+| `redis_hll` | `28d` | `daily` | Most recent 28 complete days |
+| `redis_hll` | `28d` | `weekly` | Most recent 4 complete weeks |
+
+### Data category
+
+We use the following categories to classify a metric:
+
+- `operational`: Required data for operational purposes.
+- `optional`: Default value for a metric. Data that is optional to collect. This can be [enabled or disabled](../../../user/admin_area/settings/usage_statistics.md#enable-or-disable-usage-statistics) in the Admin Area.
+- `subscription`: Data related to licensing.
+- `standard`: Standard set of identifiers that are included when collecting data.
+
+An aggregate metric is a metric that is the sum of two or more child metrics. Service Ping uses the data category of
+the aggregate metric to determine whether or not the data is included in the reported Service Ping payload.
+
+### Metric name suggestion examples (deprecated)
+
+WARNING:
+This feature was deprecated in GitLab 16.1
+and is planned for [removal](https://gitlab.com/gitlab-org/gitlab/-/issues/411602) in 16.2.
+
+#### Metric with `data_source: database`
+
+For a metric instrumented with SQL:
+
+```sql
+SELECT COUNT(DISTINCT user_id) FROM clusters WHERE clusters.management_project_id IS NOT NULL
+```
+
+- **Suggested name**: `count_distinct_user_id_from_<adjective describing: '(clusters.management_project_id IS NOT NULL)'>_clusters`
+- **Prompt**: `<adjective describing: '(clusters.management_project_id IS NOT NULL)'>`
+ should be replaced with an adjective that best represents filter conditions, such as `project_management`
+- **Final metric name**: For example, `count_distinct_user_id_from_project_management_clusters`
+
+For metric instrumented with SQL:
+
+```sql
+SELECT COUNT(DISTINCT clusters.user_id)
+FROM clusters_applications_helm
+INNER JOIN clusters ON clusters.id = clusters_applications_helm.cluster_id
+WHERE clusters_applications_helm.status IN (3, 5)
+```
+
+- **Suggested name**: `count_distinct_user_id_from_<adjective describing: '(clusters_applications_helm.status IN (3, 5))'>_clusters_<with>_<adjective describing: '(clusters_applications_helm.status IN (3, 5))'>_clusters_applications_helm`
+- **Prompt**: `<adjective describing: '(clusters_applications_helm.status IN (3, 5))'>`
+ should be replaced with an adjective that best represents filter conditions
+- **Final metric name**: `count_distinct_user_id_from_clusters_with_available_clusters_applications_helm`
+
+In the previous example, the prompt is irrelevant, and user can remove it. The second
+occurrence corresponds with the `available` scope defined in `Clusters::Concerns::ApplicationStatus`.
+It can be used as the right adjective to replace prompt.
+
+The `<with>` represents a suggested conjunction for the suggested name of the joined relation.
+The person documenting the metric can use it by either:
+
+- Removing the surrounding `<>`.
+- Using a different conjunction, such as `having` or `including`.
+
+#### Metric with `data_source: redis` or `redis_hll`
+
+For metrics instrumented with a Redis-based counter, the suggested name includes
+only the single prompt to be replaced by the person working with metrics YAML.
+
+- **Prompt**: `<please fill metric name, suggested format is: {subject}_{verb}{ing|ed}_{object} eg: users_creating_epics or merge_requests_viewed_in_single_file_mode>`
+- **Final metric name**: We suggest the metric name should follow the format of
+ `{subject}_{verb}{ing|ed}_{object}`, such as `user_creating_epics`, `users_triggering_security_scans`,
+ or `merge_requests_viewed_in_single_file_mode`
+
+#### Metric with `data_source: prometheus` or `system`
+
+For metrics instrumented with Prometheus or coming from the operating system,
+the suggested name includes only the single prompt by person working with metrics YAML.
+
+- **Prompt**: `<please fill metric name>`
+- **Final metric name**: Due to the variety of cases that can apply to this kind of metric,
+ no naming convention exists. Each person instrumenting a metric should use their
+ best judgment to come up with a descriptive name.
+
+### Example YAML metric definition
+
+The linked [`uuid`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/license/uuid.yml)
+YAML file includes an example metric definition, where the `uuid` metric is the GitLab
+instance unique identifier.
+
+```yaml
+key_path: uuid
+description: GitLab instance unique identifier
+product_section: analytics
+product_stage: analytics
+product_group: analytics_instrumentation
+value_type: string
+status: active
+milestone: 9.1
+instrumentation_class: UuidMetric
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1521
+time_frame: none
+data_source: database
+distribution:
+- ce
+- ee
+tier:
+- free
+- premium
+- ultimate
+```
+
+### Create a new metric definition
+
+The GitLab codebase provides a dedicated [generator](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/generators/gitlab/usage_metric_definition_generator.rb) to create new metric definitions.
+
+For uniqueness, the generated files include a timestamp prefix in ISO 8601 format.
+
+The generator takes a list of key paths and 3 options as arguments. It creates metric YAML definitions in the corresponding location:
+
+- `--ee`, `--no-ee` Indicates if metric is for EE.
+- `--dir=DIR` Indicates the metric directory. It must be one of: `counts_7d`, `7d`, `counts_28d`, `28d`, `counts_all`, `all`, `settings`, `license`.
+- `--class_name=CLASS_NAME` Indicates the instrumentation class. For example `UsersCreatingIssuesMetric`, `UuidMetric`
+
+**Single metric example**
+
+```shell
+bundle exec rails generate gitlab:usage_metric_definition counts.issues --dir=7d --class_name=CountIssues
+// Creates 1 file
+// create config/metrics/counts_7d/issues.yml
+```
+
+**Multiple metrics example**
+
+```shell
+bundle exec rails generate gitlab:usage_metric_definition counts.issues counts.users --dir=7d --class_name=CountUsersCreatingIssues
+// Creates 2 files
+// create config/metrics/counts_7d/issues.yml
+// create config/metrics/counts_7d/users.yml
+```
+
+NOTE:
+To create a metric definition used in EE, add the `--ee` flag.
+
+```shell
+bundle exec rails generate gitlab:usage_metric_definition counts.issues --ee --dir=7d --class_name=CountUsersCreatingIssues
+// Creates 1 file
+// create ee/config/metrics/counts_7d/issues.yml
+```
+
+### Metrics added dynamic to Service Ping payload
+
+The [Redis HLL metrics](implement.md#known-events-are-added-automatically-in-service-data-payload) are added automatically to Service Ping payload.
+
+A YAML metric definition is required for each metric. A dedicated generator is provided to create metric definitions for Redis HLL events.
+
+The generator takes `category` and `events` arguments, as the root key is `redis_hll_counters`, and creates two metric definitions for each of the events (for weekly and monthly time frames):
+
+**Single metric example**
+
+```shell
+bundle exec rails generate gitlab:usage_metric_definition:redis_hll issues count_users_closing_issues
+// Creates 2 files
+// create config/metrics/counts_7d/count_users_closing_issues_weekly.yml
+// create config/metrics/counts_28d/count_users_closing_issues_monthly.yml
+```
+
+**Multiple metrics example**
+
+```shell
+bundle exec rails generate gitlab:usage_metric_definition:redis_hll issues count_users_closing_issues count_users_reopening_issues
+// Creates 4 files
+// create config/metrics/counts_7d/count_users_closing_issues_weekly.yml
+// create config/metrics/counts_28d/count_users_closing_issues_monthly.yml
+// create config/metrics/counts_7d/count_users_reopening_issues_weekly.yml
+// create config/metrics/counts_28d/count_users_reopening_issues_monthly.yml
+```
+
+To create a metric definition used in EE, add the `--ee` flag.
+
+```shell
+bundle exec rails generate gitlab:usage_metric_definition:redis_hll issues users_closing_issues --ee
+// Creates 2 files
+// create config/metrics/counts_7d/i_closed_weekly.yml
+// create config/metrics/counts_28d/i_closed_monthly.yml
+```
+
+## Metrics Dictionary
+
+[Metrics Dictionary is a separate application](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/metric-dictionary).
+
+All metrics available in Service Ping are in the [Metrics Dictionary](https://metrics.gitlab.com/).
+
+### Copy query to clipboard
+
+To check if a metric has data in Sisense, use the copy query to clipboard feature. This copies a query that's ready to use in Sisense. The query gets the last five service ping data for GitLab.com for a given metric. For information about how to check if a Service Ping metric has data in Sisense, see this [demo](https://www.youtube.com/watch?v=n4o65ivta48).
diff --git a/doc/development/internal_analytics/service_ping/metrics_instrumentation.md b/doc/development/internal_analytics/service_ping/metrics_instrumentation.md
new file mode 100644
index 00000000000..b6ca773a572
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/metrics_instrumentation.md
@@ -0,0 +1,478 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Metrics instrumentation guide
+
+This guide describes how to develop Service Ping metrics using metrics instrumentation.
+
+<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
+For a video tutorial, see the [Adding Service Ping metric via instrumentation class](https://youtu.be/p2ivXhNxUoY).
+
+## Nomenclature
+
+- **Instrumentation class**:
+ - Inherits one of the metric classes: `DatabaseMetric`, `RedisMetric`, `RedisHLLMetric`, `NumbersMetric` or `GenericMetric`.
+ - Implements the logic that calculates the value for a Service Ping metric.
+
+- **Metric definition**
+ The Service Data metric YAML definition.
+
+- **Hardening**:
+ Hardening a method is the process that ensures the method fails safe, returning a fallback value like -1.
+
+## How it works
+
+A metric definition has the [`instrumentation_class`](metrics_dictionary.md) field, which can be set to a class.
+
+The defined instrumentation class should inherit one of the existing metric classes: `DatabaseMetric`, `RedisMetric`, `RedisHLLMetric`, `NumbersMetric` or `GenericMetric`.
+
+The current convention is that a single instrumentation class corresponds to a single metric. On rare occasions, there are exceptions to that convention like [Redis metrics](#redis-metrics). To use a single instrumentation class for more than one metric, please reach out to one of the `@gitlab-org/analytics-section/analytics-instrumentation/engineers` members to consult about your case.
+
+Using the instrumentation classes ensures that metrics can fail safe individually, without breaking the entire
+ process of Service Ping generation.
+
+We have built a domain-specific language (DSL) to define the metrics instrumentation.
+
+## Database metrics
+
+You can use database metrics to track data kept in the database, for example, a count of issues that exist on a given instance.
+
+- `operation`: Operations for the given `relation`, one of `count`, `distinct_count`, `sum`, and `average`.
+- `relation`: Assigns lambda that returns the `ActiveRecord::Relation` for the objects we want to perform the `operation`. The assigned lambda can accept up to one parameter. The parameter is hashed and stored under the `options` key in the metric definition.
+- `start`: Specifies the start value of the batch counting, by default is `relation.minimum(:id)`.
+- `finish`: Specifies the end value of the batch counting, by default is `relation.maximum(:id)`.
+- `cache_start_and_finish_as`: Specifies the cache key for `start` and `finish` values and sets up caching them. Use this call when `start` and `finish` are expensive queries that should be reused between different metric calculations.
+- `available?`: Specifies whether the metric should be reported. The default is `true`.
+- `timestamp_column`: Optionally specifies timestamp column for metric used to filter records for time constrained metrics. The default is `created_at`.
+
+[Example of a merge request that adds a database metric](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60022).
+
+```ruby
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class CountIssuesMetric < DatabaseMetric
+ operation :count
+
+ relation ->(options) { Issue.where(confidential: options[:confidential]) }
+ end
+ end
+ end
+ end
+end
+```
+
+### Ordinary batch counters Example
+
+```ruby
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class CountIssuesMetric < DatabaseMetric
+ operation :count
+
+ start { Issue.minimum(:id) }
+ finish { Issue.maximum(:id) }
+
+ relation { Issue }
+ end
+ end
+ end
+ end
+end
+```
+
+### Distinct batch counters Example
+
+```ruby
+# frozen_string_literal: true
+
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class CountUsersAssociatingMilestonesToReleasesMetric < DatabaseMetric
+ operation :distinct_count, column: :author_id
+
+ relation { Release.with_milestones }
+
+ start { Release.minimum(:author_id) }
+ finish { Release.maximum(:author_id) }
+ end
+ end
+ end
+ end
+end
+```
+
+### Sum Example
+
+```ruby
+# frozen_string_literal: true
+
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class JiraImportsTotalImportedIssuesCountMetric < DatabaseMetric
+ operation :sum, column: :imported_issues_count
+
+ relation { JiraImportState.finished }
+ end
+ end
+ end
+ end
+end
+```
+
+### Average Example
+
+```ruby
+# frozen_string_literal: true
+
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class CountIssuesWeightAverageMetric < DatabaseMetric
+ operation :average, column: :weight
+
+ relation { Issue }
+ end
+ end
+ end
+ end
+end
+```
+
+## Redis metrics
+
+You can use Redis metrics to track events not kept in the database, for example, a count of how many times the search bar has been used.
+
+[Example of a merge request that adds `Redis` metrics](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/103455).
+
+The `RedisMetric` class can only be used as the `instrumentation_class` for Redis metrics with simple counters classes (classes that only inherit `BaseCounter` and set `PREFIX` and `KNOWN_EVENTS` constants). In case the counter class has additional logic included in it, a new `instrumentation_class`, inheriting from `RedisMetric`, needs to be created. This new class needs to include the additional logic from the counter class.
+
+Required options:
+
+- `event`: the event name.
+- `prefix`: the value of the `PREFIX` constant used in the counter classes from the `Gitlab::UsageDataCounters` namespace.
+
+Count unique values for `source_code_pushes` event.
+
+```yaml
+time_frame: all
+data_source: redis
+instrumentation_class: RedisMetric
+options:
+ event: pushes
+ prefix: source_code
+```
+
+### Availability-restrained Redis metrics
+
+If the Redis metric should only be available in the report under some conditions, then you must specify these conditions in a new class that is a child of the `RedisMetric` class.
+
+```ruby
+# frozen_string_literal: true
+
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class MergeUsageCountRedisMetric < RedisMetric
+ available? { Feature.enabled?(:merge_usage_data_missing_key_paths) }
+ end
+ end
+ end
+ end
+end
+```
+
+You must also use the class's name in the YAML setup.
+
+```yaml
+time_frame: all
+data_source: redis
+instrumentation_class: MergeUsageCountRedisMetric
+options:
+ event: pushes
+ prefix: source_code
+```
+
+## Redis HyperLogLog metrics
+
+You can use Redis HyperLogLog metrics to track events not kept in the database and incremented for unique values such as unique users,
+for example, a count of how many different users used the search bar.
+
+[Example of a merge request that adds a `RedisHLL` metric](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61685).
+
+Count unique values for `i_quickactions_approve` event.
+
+```yaml
+time_frame: 28d
+data_source: redis_hll
+instrumentation_class: RedisHLLMetric
+options:
+ events:
+ - i_quickactions_approve
+```
+
+### Availability-restrained Redis HyperLogLog metrics
+
+If the Redis HyperLogLog metric should only be available in the report under some conditions, then you must specify these conditions in a new class that is a child of the `RedisHLLMetric` class.
+
+```ruby
+# frozen_string_literal: true
+
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class MergeUsageCountRedisHLLMetric < RedisHLLMetric
+ available? { Feature.enabled?(:merge_usage_data_missing_key_paths) }
+ end
+ end
+ end
+ end
+end
+```
+
+You must also use the class's name in the YAML setup.
+
+```yaml
+time_frame: 28d
+data_source: redis_hll
+instrumentation_class: MergeUsageCountRedisHLLMetric
+options:
+ events:
+ - i_quickactions_approve
+```
+
+## Aggregated metrics
+
+<div class="video-fallback">
+ See the video from: <a href="https://www.youtube.com/watch?v=22LbYqHwtUQ">Product Intelligence Office Hours Oct 6th</a> for an aggregated metrics walk-through.
+</div>
+<figure class="video-container">
+ <iframe src="https://www.youtube-nocookie.com/embed/22LbYqHwtUQ" frameborder="0" allowfullscreen> </iframe>
+</figure>
+
+The aggregated metrics feature provides insight into the number of data attributes, for example `pseudonymized_user_ids`, that occurred in a collection of events. For example, you can aggregate the number of users who perform multiple actions such as creating a new issue and opening
+a new merge request.
+
+You can use a YAML file to define your aggregated metrics. The following arguments are required:
+
+- `options.events`: List of event names to aggregate into metric data. All events in this list must
+ use the same data source. Additional data source requirements are described in
+ [Database sourced aggregated metrics](implement.md#database-sourced-aggregated-metrics) and
+ [Redis sourced aggregated metrics](implement.md#redis-sourced-aggregated-metrics).
+- `options.aggregate.operator`: Operator that defines how the aggregated metric data is counted. Available operators are:
+ - `OR`: Removes duplicates and counts all entries that triggered any of the listed events.
+ - `AND`: Removes duplicates and counts all elements that were observed triggering all of the following events.
+- `options.aggregate.attribute`: Information pointing to the attribute that is being aggregated across events.
+- `time_frame`: One or more valid time frames. Use these to limit the data included in aggregated metrics to events within a specific date-range. Valid time frames are:
+ - `7d`: The last 7 days of data.
+ - `28d`: The last 28 days of data.
+ - `all`: All historical data, only available for `database` sourced aggregated metrics.
+- `data_source`: Data source used to collect all events data included in the aggregated metrics. Valid data sources are:
+ - [`database`](implement.md#database-sourced-aggregated-metrics)
+ - [`redis_hll`](implement.md#redis-sourced-aggregated-metrics)
+
+Refer to merge request [98206](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/98206) for an example of a merge request that adds an `AggregatedMetric` metric.
+
+Count unique `user_ids` that occurred in at least one of the events: `incident_management_alert_status_changed`,
+`incident_management_alert_assigned`, `incident_management_alert_todo`, `incident_management_alert_create_incident`.
+
+```yaml
+time_frame: 28d
+instrumentation_class: AggregatedMetric
+data_source: redis_hll
+options:
+ aggregate:
+ operator: OR
+ attribute: user_id
+ events:
+ - `incident_management_alert_status_changed`
+ - `incident_management_alert_assigned`
+ - `incident_management_alert_todo`
+ - `incident_management_alert_create_incident`
+```
+
+### Availability-restrained Aggregated metrics
+
+If the Aggregated metric should only be available in the report under specific conditions, then you must specify these conditions in a new class that is a child of the `AggregatedMetric` class.
+
+```ruby
+# frozen_string_literal: true
+
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class MergeUsageCountAggregatedMetric < AggregatedMetric
+ available? { Feature.enabled?(:merge_usage_data_missing_key_paths) }
+ end
+ end
+ end
+ end
+end
+```
+
+You must also use the class's name in the YAML setup.
+
+```yaml
+time_frame: 28d
+instrumentation_class: MergeUsageCountAggregatedMetric
+data_source: redis_hll
+options:
+ aggregate:
+ operator: OR
+ attribute: user_id
+ events:
+ - `incident_management_alert_status_changed`
+ - `incident_management_alert_assigned`
+ - `incident_management_alert_todo`
+ - `incident_management_alert_create_incident`
+```
+
+## Numbers metrics
+
+- `operation`: Operations for the given `data` block. Currently we only support `add` operation.
+- `data`: a `block` which contains an array of numbers.
+- `available?`: Specifies whether the metric should be reported. The default is `true`.
+
+```ruby
+# frozen_string_literal: true
+
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class IssuesBoardsCountMetric < NumbersMetric
+ operation :add
+
+ data do |time_frame|
+ [
+ CountIssuesMetric.new(time_frame: time_frame).value,
+ CountBoardsMetric.new(time_frame: time_frame).value
+ ]
+ end
+ end
+ end
+ end
+ end
+ end
+end
+```
+
+You must also include the instrumentation class name in the YAML setup.
+
+```yaml
+time_frame: 28d
+instrumentation_class: IssuesBoardsCountMetric
+```
+
+## Generic metrics
+
+You can use generic metrics for other metrics, for example, an instance's database version. Observations type of data will always have a Generic metric counter type.
+
+- `value`: Specifies the value of the metric.
+- `available?`: Specifies whether the metric should be reported. The default is `true`.
+
+[Example of a merge request that adds a generic metric](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60256).
+
+```ruby
+module Gitlab
+ module Usage
+ module Metrics
+ module Instrumentations
+ class UuidMetric < GenericMetric
+ value do
+ Gitlab::CurrentSettings.uuid
+ end
+ end
+ end
+ end
+ end
+end
+```
+
+## Support for instrumentation classes
+
+There is support for:
+
+- `count`, `distinct_count`, `estimate_batch_distinct_count`, `sum`, and `average` for [database metrics](#database-metrics).
+- [Redis metrics](#redis-metrics).
+- [Redis HLL metrics](#redis-hyperloglog-metrics).
+- `add` for [numbers metrics](#numbers-metrics).
+- [Generic metrics](#generic-metrics), which are metrics based on settings or configurations.
+
+There is no support for:
+
+- `add`, `histogram` for database metrics.
+
+You can [track the progress to support these](https://gitlab.com/groups/gitlab-org/-/epics/6118).
+
+## Create a new metric instrumentation class
+
+To create a stub instrumentation for a Service Ping metric, you can use a dedicated [generator](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/generators/gitlab/usage_metric_generator.rb):
+
+The generator takes the class name as an argument and the following options:
+
+- `--type=TYPE` Required. Indicates the metric type. It must be one of: `database`, `generic`, `redis`, `numbers`.
+- `--operation` Required for `database` & `numbers` type.
+ - For `database` it must be one of: `count`, `distinct_count`, `estimate_batch_distinct_count`, `sum`, `average`.
+ - For `numbers` it must be: `add`.
+- `--ee` Indicates if the metric is for EE.
+
+```shell
+rails generate gitlab:usage_metric CountIssues --type database --operation distinct_count
+ create lib/gitlab/usage/metrics/instrumentations/count_issues_metric.rb
+ create spec/lib/gitlab/usage/metrics/instrumentations/count_issues_metric_spec.rb
+```
+
+## Migrate Service Ping metrics to instrumentation classes
+
+This guide describes how to migrate a Service Ping metric from [`lib/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb) or [`ee/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/lib/ee/gitlab/usage_data.rb) to instrumentation classes.
+
+1. Choose the metric type:
+
+- [Database metric](#database-metrics)
+- [Redis HyperLogLog metrics](#redis-hyperloglog-metrics)
+- [Redis metric](#redis-metrics)
+- [Numbers metric](#numbers-metrics)
+- [Generic metric](#generic-metrics)
+
+1. Determine the location of instrumentation class: either under `ee` or outside `ee`.
+
+1. [Generate the instrumentation class file](#create-a-new-metric-instrumentation-class).
+
+1. Fill the instrumentation class body:
+
+ - Add code logic for the metric. This might be similar to the metric implementation in `usage_data.rb`.
+ - Add tests for the individual metric [`spec/lib/gitlab/usage/metrics/instrumentations/`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/usage/metrics/instrumentations).
+ - Add tests for Service Ping.
+
+1. [Generate the metric definition file](metrics_dictionary.md#create-a-new-metric-definition).
+
+1. Remove the code from [`lib/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb) or [`ee/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/lib/ee/gitlab/usage_data.rb).
+
+1. Remove the tests from [`spec/lib/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/spec/lib/gitlab/usage_data_spec.rb) or [`ee/spec/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/spec/lib/ee/gitlab/usage_data_spec.rb).
+
+## Troubleshoot metrics
+
+Sometimes metrics fail for reasons that are not immediately clear. The failures can be related to performance issues or other problems.
+The following pairing session video gives you an example of an investigation in to a real-world failing metric.
+
+<div class="video-fallback">
+ See the video from: <a href="https://www.youtube.com/watch?v=y_6m2POx2ug">Product Intelligence Office Hours Oct 27th</a> to learn more about the metrics troubleshooting process.
+</div>
+<figure class="video-container">
+ <iframe src="https://www.youtube-nocookie.com/embed/y_6m2POx2ug" frameborder="0" allowfullscreen> </iframe>
+</figure>
diff --git a/doc/development/internal_analytics/service_ping/metrics_lifecycle.md b/doc/development/internal_analytics/service_ping/metrics_lifecycle.md
new file mode 100644
index 00000000000..cc56863690c
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/metrics_lifecycle.md
@@ -0,0 +1,106 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Service Ping metric lifecycle
+
+The following guidelines explain the steps to follow at each stage of a metric's lifecycle.
+
+## Add a new metric
+
+Follow the [Implement Service Ping](implement.md) guide.
+
+## Change an existing metric
+
+WARNING:
+We want to **PREVENT** changes to the calculation logic or important attributes on any metric as this invalidates comparisons of the same metric across different versions of GitLab.
+
+If you change a metric, you have to consider that not all instances of GitLab are running on the newest version. Old instances will still report the old version of the metric.
+Additionally, a metric's reported numbers are primarily interesting compared to previously reported numbers.
+As a result, if you need to change one of the following parts of a metric, you need to add a new metric instead. It's your choice whether to keep the old metric alongside the new one or [remove it](#remove-a-metric).
+
+- **calculation logic**: This means any changes that can produce a different value than the previous implementation
+- **YAML attributes**: The following attributes are directly used for analysis or calculation: `key_path`, `time_frame`, `value_type`, `data_source`.
+
+If you change the `performance_indicator_type` attribute of a metric or think your case needs an exception from the outlined rules then please notify the Customer Success Ops team (`@csops-team`), Analytics Engineers (`@gitlab-data/analytics-engineers`), and Product Analysts (`@gitlab-data/product-analysts`) teams by `@` mentioning those groups in a comment on the merge request or issue.
+
+You can change any other attributes without impact to the calculation or analysis. See [this video tutorial](https://youtu.be/bYf3c01KCls) for help updating metric attributes.
+
+Currently, the [Metrics Dictionary](https://metrics.gitlab.com/) is built automatically once a day. You can see the change in the dictionary within 24 hours when you change the metric's YAML file.
+
+## Remove a metric
+
+WARNING:
+If a metric is not used in Sisense or any other system after 6 months, the
+Analytics Instrumentation team marks it as inactive and assigns it to the group owner for review.
+
+We are working on automating this process. See [this epic](https://gitlab.com/groups/gitlab-org/-/epics/8988) for details.
+
+Analytics Instrumentation removes metrics from Service Ping if they are not used in any Sisense dashboard.
+
+For an example of the metric removal process, see this [example issue](https://gitlab.com/gitlab-org/gitlab/-/issues/388236).
+
+To remove a metric:
+
+1. Create an issue for removing the metric if none exists yet. The issue needs to outline why the metric should be deleted. You can use this issue to document the removal process.
+
+1. Verify the metric is not used to calculate the conversational index. The
+ conversational index is a measure that reports back to self-managed instances
+ to inform administrators of the progress of DevOps adoption for the instance.
+
+ You can check
+ [`CalculateConvIndexService`](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/app/services/calculate_conv_index_service.rb)
+ to view the metrics that are used. The metrics are represented
+ as the keys that are passed as a field argument into the `get_value` method.
+
+1. Verify that removing the metric from the Service Ping payload does not cause
+ errors in [Version App](https://gitlab.com/gitlab-services/version-gitlab-com)
+ when the updated payload is collected and processed. Version App collects
+ and persists all Service Ping reports. To verify Service Ping processing in your local development environment, follow this [guide](https://www.youtube.com/watch?v=FS5emplabRU).
+ Alternatively, you can modify [fixtures](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/spec/support/usage_data_helpers.rb#L540)
+ used to test the [`UsageDataController#create`](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/3760ef28/spec/controllers/usage_data_controller_spec.rb#L75)
+ endpoint, and assure that test suite does not fail when metric that you wish to remove is not included into test payload.
+
+1. Remove data from Redis
+
+ For [Ordinary Redis](implement.md#ordinary-redis-counters) counters remove data stored in Redis.
+
+ - Add a migration to remove the data from Redis for the related Redis keys. For more details, see [this MR example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/82604/diffs).
+
+1. Create an issue in the
+ [GitLab Data Team project](https://gitlab.com/gitlab-data/analytics/-/issues).
+ Ask for confirmation that the metric is not referred to in any SiSense dashboards and
+ can be safely removed from Service Ping. Use this
+ [example issue](https://gitlab.com/gitlab-data/analytics/-/issues/15266) for guidance.
+
+1. Notify the Customer Success Ops team (`@csops-team`), Analytics Engineers (`@gitlab-data/analytics-engineers`), and Product Analysts (`@gitlab-data/product-analysts`) by `@` mentioning those groups in a comment in the issue from step 1 regarding the deletion of the metric.
+ Many Service Ping metrics are relied upon for health score and XMAU reporting and unexpected changes to those metrics could break reporting.
+
+1. After you verify the metric can be safely removed,
+ update the attributes of the metric's YAML definition:
+
+ - Set the `status:` to `removed`.
+ - Set `removed_by_url:` to the URL of the MR removing the metric
+ - Set `milestone_removed:` to the number of the
+ milestone in which the metric was removed.
+
+ Do not remove the metric's YAML definition altogether. Some self-managed
+ instances might not immediately update to the latest version of GitLab, and
+ therefore continue to report the removed metric. The Analytics Instrumentation team
+ requires a record of all removed metrics to identify and filter them.
+
+ For example please take a look at this [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60149/diffs#b01f429a54843feb22265100c0e4fec1b7da1240_10_10).
+
+1. After you verify the metric can be safely removed,
+ remove the metric's instrumentation from
+ [`lib/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb)
+ or
+ [`ee/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/lib/ee/gitlab/usage_data.rb).
+
+ For example please take a look at this [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60149/diffs#6335dc533bd21df26db9de90a02dd66278c2390d_167_167).
+
+1. Remove any other records related to the metric:
+ - The feature flag YAML file at [`config/feature_flags/*/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/feature_flags).
+ - The entry in the known events YAML file at [`lib/gitlab/usage_data_counters/known_events/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/usage_data_counters/known_events).
diff --git a/doc/development/internal_analytics/service_ping/performance_indicator_metrics.md b/doc/development/internal_analytics/service_ping/performance_indicator_metrics.md
new file mode 100644
index 00000000000..d7811c52bb1
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/performance_indicator_metrics.md
@@ -0,0 +1,16 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Performance Indicator Metrics guide
+
+This guide describes how to use metrics definitions to define [performance indicator](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/#implementing-product-performance-indicators) metrics.
+
+To use a metric definition to manage a performance indicator:
+
+1. Create a merge request that includes related changes.
+1. Use labels `~"analytics instrumentation"`, `"~Data Warehouse::Impact Check"`.
+1. Update the metric definition `performance_indicator_type` [field](metrics_dictionary.md#metrics-definition-and-validation).
+1. Create an issue in GitLab Product Data Insights project with the [PI Chart Help template](https://gitlab.com/gitlab-data/product-analytics/-/issues/new?issuable_template=PI%20Chart%20Help) to have the new metric visualized.
diff --git a/doc/development/internal_analytics/service_ping/review_guidelines.md b/doc/development/internal_analytics/service_ping/review_guidelines.md
new file mode 100644
index 00000000000..31b6c3f5580
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/review_guidelines.md
@@ -0,0 +1,80 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Service Ping review guidelines
+
+This page includes introductory material for a
+[Analytics Instrumentation](https://about.gitlab.com/handbook/engineering/development/analytics/analytics-instrumentation/)
+review, and is specific to Service Ping related reviews. For broader advice and
+general best practices for code reviews, refer to our [code review guide](../../code_review.md).
+
+## Resources for reviewers
+
+- [Service Ping Guide](index.md)
+- [Metrics Dictionary](https://metrics.gitlab.com/)
+
+## Review process
+
+We recommend a Analytics Instrumentation review when a merge request (MR) touches
+any of the following Service Ping files:
+
+- `usage_data*` files.
+- The Metrics Dictionary, including files in:
+ - [`config/metrics`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/metrics).
+ - [`ee/config/metrics`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee/config/metrics).
+ - [`schema.json`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json).
+- Analytics Instrumentation tooling. For example,
+ [`Gitlab::UsageMetricDefinitionGenerator`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/generators/gitlab/usage_metric_definition_generator.rb)
+
+### Roles and process
+
+#### The merge request **author** should
+
+- Decide whether a Analytics Instrumentation review is needed. You can skip the Analytics Instrumentation
+review and remove the labels if the changes are not related to the Analytics Instrumentation domain and
+are regular backend changes.
+- If a Analytics Instrumentation review is needed, add the labels
+ `~analytics instrumentation` and `~analytics instrumentation::review pending`.
+- For merge requests authored by Analytics Instrumentation team members:
+ - Assign both the `~backend` and `~analytics instrumentation` reviews to another Analytics Instrumentation team member.
+ - Assign the maintainer review to someone outside of the Analytics Instrumentation group.
+- Assign an
+ [engineer](https://gitlab.com/groups/gitlab-org/analytics-section/analytics-instrumentation/engineers/-/group_members?with_inherited_permissions=exclude) from the Analytics Instrumentation team for a review.
+- Set the correct attributes in the metric's YAML definition:
+ - `product_section`, `product_stage`, `product_group`
+ - Provide a clear description of the metric.
+- Add a changelog [according to guidelines](../../changelog.md).
+
+#### The Analytics Instrumentation **reviewer** should
+
+- Perform a first-pass review on the merge request and suggest improvements to the author.
+- Check the [metrics location](metrics_dictionary.md#metric-key_path) in
+ the Service Ping JSON payload.
+- Add the `~database` label and ask for a [database review](../../database_review.md) for
+ metrics that are based on Database.
+- Add `~Data Warehouse::Impact Check` for any database metric that has a query change. Changes in queries can affect [data operations](https://about.gitlab.com/handbook/business-technology/data-team/how-we-work/triage/#gitlabcom-db-structure-changes).
+- For tracking using Redis HLL (HyperLogLog):
+ - Check if a [feature flag is needed](implement.md#recommendations).
+- For a metric's YAML definition:
+ - Check the metric's `description`.
+ - Check the metric's `key_path`.
+ - Check the `product_section`, `product_stage`, and `product_group` fields.
+ Read the [stages file](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml).
+ - Check the file location. Consider the time frame, and if the file should be under `ee`.
+ - Check the tiers.
+- If a metric was changed or removed: Make sure the MR author notified the Customer Success Ops team (`@csops-team`), Analytics Engineers (`@gitlab-data/analytics-engineers`), and Product Analysts (`@gitlab-data/product-analysts`) by `@` mentioning those groups in a comment on the issue for the MR and all of these groups have acknowledged the removal.
+- Metrics instrumentations
+ - Recommend using metrics instrumentation for new metrics, [if possible](metrics_instrumentation.md#support-for-instrumentation-classes).
+- Approve the MR, and relabel the MR with `~"analytics instrumentation::approved"`.
+
+## Review workload distribution
+
+[Danger bot](../../dangerbot.md) adds the list of changed Analytics Instrumentation files
+and pings the
+[`@gitlab-org/analytics-section/analytics-instrumentation/engineers`](https://gitlab.com/groups/gitlab-org/analytics-section/analytics-instrumentation/engineers/-/group_members?with_inherited_permissions=exclude) group for merge requests
+that are not drafts.
+
+Any of the Analytics Instrumentation engineers can be assigned for the Analytics Instrumentation review.
diff --git a/doc/development/internal_analytics/service_ping/troubleshooting.md b/doc/development/internal_analytics/service_ping/troubleshooting.md
new file mode 100644
index 00000000000..2b285b85bd0
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/troubleshooting.md
@@ -0,0 +1,164 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Troubleshooting Service Ping
+
+## Service Ping Payload drop
+
+### Symptoms
+
+You will be alerted by the [Data team](https://about.gitlab.com/handbook/business-technology/data-team/) and their [Monte Carlo alerting](https://about.gitlab.com/handbook/business-technology/data-team/platform/monte-carlo/).
+
+### Locating the problem
+
+First you need to identify at which stage in Service Ping data pipeline the drop is occurring.
+
+Start at [Service Ping Health Dashboard](https://app.periscopedata.com/app/gitlab/968489) on Sisense.
+
+You can use [this query](https://gitlab.com/gitlab-org/gitlab/-/issues/347298#note_836685350) as an example, to start detecting when the drop started.
+
+### Troubleshoot the GitLab application layer
+
+For results about an investigation conducted into an unexpected drop in Service ping Payload events volume, see [this issue](https://gitlab.com/gitlab-data/analytics/-/issues/11071).
+
+### Troubleshoot VersionApp layer
+
+Check if the [export jobs](https://gitlab.com/gitlab-services/version-gitlab-com#data-export-using-pipeline-schedules) are successful.
+
+Check [Service Ping errors](https://app.periscopedata.com/app/gitlab/968489?widget=14609989&udv=0) in the [Service Ping Health Dashboard](https://app.periscopedata.com/app/gitlab/968489).
+
+### Troubleshoot Google Storage layer
+
+Check if the files are present in [Google Storage](https://console.cloud.google.com/storage/browser/cloudsql-gs-production-efd5e8-cloudsql-exports;tab=objects?project=gs-production-efd5e8&prefix=&forceOnObjectsSortingFiltering=false).
+
+### Troubleshoot the data warehouse layer
+
+Reach out to the [Data team](https://about.gitlab.com/handbook/business-technology/data-team/) to ask about current state of data warehouse. On their handbook page there is a [section with contact details](https://about.gitlab.com/handbook/business-technology/data-team/#how-to-connect-with-us).
+
+### Cannot disable Service Ping with the configuration file
+
+The method to disable Service Ping with the GitLab configuration file does not work in
+GitLab versions 9.3.0 to 13.12.3. To disable it, you must use the Admin Area in
+the GitLab UI instead. For more information, see
+[this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/333269).
+
+GitLab functionality and application settings cannot override or circumvent
+restrictions at the network layer. If Service Ping is blocked by your firewall,
+you are not impacted by this bug.
+
+#### Check if you are affected
+
+You can check if you were affected by this bug by using the Admin Area or by
+checking the configuration file of your GitLab instance:
+
+- Using the Admin Area:
+
+ 1. On the left sidebar, expand the top-most chevron (**{chevron-down}**).
+ 1. Select **Admin Area**.
+ 1. On the left sidebar, select **Settings > Metrics and profiling**.
+ 1. Expand **Usage statistics**.
+ 1. Are you able to check or uncheck the checkbox to disable Service Ping?
+
+ - If _yes_, your GitLab instance is not affected by this bug.
+ - If you can't check or uncheck the checkbox, you are affected by this bug.
+ See the steps on [how to fix this](#how-to-fix-the-cannot-disable-service-ping-bug).
+
+- Checking your GitLab instance configuration file:
+
+ To check whether you're impacted by this bug, check your instance configuration
+ settings. The configuration file in which Service Ping can be disabled depends
+ on your installation and deployment method, but is typically one of the following:
+
+ - `/etc/gitlab/gitlab.rb` for Linux package installations and Docker.
+ - `charts.yaml` for GitLab Helm and cloud-native Kubernetes deployments.
+ - `gitlab.yml` for GitLab installations from source.
+
+ To check the relevant configuration file for strings that indicate whether
+ Service Ping is disabled, you can use `grep`:
+
+ ```shell
+ # Linux package
+ grep "usage_ping_enabled'\] = false" /etc/gitlab/gitlab.rb
+
+ # Kubernetes charts
+ grep "enableUsagePing: false" values.yaml
+
+ # From source
+ grep "usage_ping_enabled'\] = false" gitlab/config.yml
+ ```
+
+ If you see any output after running the relevant command, your GitLab instance
+ may be affected by the bug. Otherwise, your instance is not affected.
+
+#### How to fix the "Cannot disable Service Ping" bug
+
+To work around this bug, you have two options:
+
+- [Update](../../../update/index.md) to GitLab 13.12.4 or newer to fix this bug.
+- If you can't update to GitLab 13.12.4 or newer, enable Service Ping in the
+ configuration file, then disable Service Ping in the UI. For example, if you're
+ using the Linux package:
+
+ 1. Edit `/etc/gitlab/gitlab.rb`:
+
+ ```ruby
+ gitlab_rails['usage_ping_enabled'] = true
+ ```
+
+ 1. Reconfigure GitLab:
+
+ ```shell
+ sudo gitlab-ctl reconfigure
+ ```
+
+ 1. On the left sidebar, expand the top-most chevron (**{chevron-down}**).
+ 1. Select **Admin Area**.
+ 1. On the left sidebar, select **Settings > Metrics and profiling**.
+ 1. Expand **Usage statistics**.
+ 1. Clear the **Enable Service Ping** checkbox.
+ 1. Select **Save Changes**.
+
+## Generate Service Ping
+
+### Generate or get the cached Service Ping in rails console
+
+Use the following method in the [rails console](../../../administration/operations/rails_console.md#starting-a-rails-console-session).
+
+```ruby
+Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values, cached: true)
+```
+
+### Generate a fresh new Service Ping
+
+Use the following method in the [rails console](../../../administration/operations/rails_console.md#starting-a-rails-console-session).
+
+This also refreshes the cached Service Ping displayed in the Admin Area.
+
+```ruby
+Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values)
+```
+
+### Generate and print
+
+Generates Service Ping data in JSON format.
+
+```shell
+gitlab-rake gitlab:usage_data:generate
+```
+
+Generates Service Ping data in YAML format:
+
+```shell
+gitlab-rake gitlab:usage_data:dump_sql_in_yaml
+```
+
+### Generate and send Service Ping
+
+Prints the metrics saved in `conversational_development_index_metrics`.
+
+```shell
+gitlab-rake gitlab:usage_data:generate_and_send
+```
diff --git a/doc/development/internal_analytics/service_ping/usage_data.md b/doc/development/internal_analytics/service_ping/usage_data.md
new file mode 100644
index 00000000000..b6ec3e00670
--- /dev/null
+++ b/doc/development/internal_analytics/service_ping/usage_data.md
@@ -0,0 +1,69 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Usage Data Metrics guide
+
+This guide describes deprecated usage for metrics in `usage_data.rb`.
+
+NOTE:
+Implementing metrics direct in `usage_data.rb` is deprecated, We recommend you use [instrumentation classes](metrics_instrumentation.md).
+
+## Ordinary batch counters
+
+Simple count of a given `ActiveRecord_Relation`, does a non-distinct batch count, smartly reduces `batch_size`, and handles errors.
+Handles the `ActiveRecord::StatementInvalid` error.
+
+Method:
+
+```ruby
+count(relation, column = nil, batch: true, start: nil, finish: nil)
+```
+
+Arguments:
+
+- `relation` the ActiveRecord_Relation to perform the count
+- `column` the column to perform the count on, by default is the primary key
+- `batch`: default `true` to use batch counting
+- `start`: custom start of the batch counting to avoid complex min calculations
+- `end`: custom end of the batch counting to avoid complex min calculations
+
+Examples:
+
+```ruby
+count(User.active)
+count(::Clusters::Cluster.aws_installed.enabled, :cluster_id)
+count(::Clusters::Cluster.aws_installed.enabled, :cluster_id, start: ::Clusters::Cluster.minimum(:id), finish: ::Clusters::Cluster.maximum(:id))
+```
+
+## Distinct batch counters
+
+Distinct count of a given `ActiveRecord_Relation` on given column, a distinct batch count, smartly reduces `batch_size`, and handles errors.
+Handles the `ActiveRecord::StatementInvalid` error.
+
+Method:
+
+```ruby
+distinct_count(relation, column = nil, batch: true, batch_size: nil, start: nil, finish: nil)
+```
+
+Arguments:
+
+- `relation`: the ActiveRecord_Relation to perform the count
+- `column`: the column to perform the distinct count, by default is the primary key
+- `batch`: default `true` to use batch counting
+- `batch_size`: if none set it uses default value 10000 from `Gitlab::Database::BatchCounter`
+- `start`: custom start of the batch counting to avoid complex min calculations
+- `end`: custom end of the batch counting to avoid complex min calculations
+
+WARNING:
+Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../../database/iterating_tables_in_batches.md) guide.
+
+Examples:
+
+```ruby
+distinct_count(::Project, :creator_id)
+distinct_count(::Note.with_suggestions.where(time_period), :author_id, start: ::User.minimum(:id), finish: ::User.maximum(:id))
+```
diff --git a/doc/development/internal_analytics/snowplow/event_dictionary_guide.md b/doc/development/internal_analytics/snowplow/event_dictionary_guide.md
new file mode 100644
index 00000000000..6e8947e0210
--- /dev/null
+++ b/doc/development/internal_analytics/snowplow/event_dictionary_guide.md
@@ -0,0 +1,91 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Event dictionary guide
+
+NOTE:
+The event dictionary is a work in progress, and this process is subject to change.
+
+This guide describes the event dictionary and how it's implemented.
+
+## Event definition and validation
+
+This process is meant to document all Snowplow events and ensure consistency. Every Snowplow event needs to have such a definition. Event definitions must comply with the [JSON Schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/events/schema.json).
+
+All event definitions are stored in the following directories:
+
+- [`config/events`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/events)
+- [`ee/config/events`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee/config/events)
+
+Each event is defined in a separate YAML file consisting of the following fields:
+
+| Field | Required | Additional information |
+|------------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `description` | yes | A description of the event. |
+| `category` | yes | The event category (see [Event schema](index.md#event-schema)). |
+| `action` | yes | The event action (see [Event schema](index.md#event-schema)). |
+| `label_description` | no | A description of the event label (see [Event schema](index.md#event-schema)). |
+| `property_description` | no | A description of the event property (see [Event schema](index.md#event-schema)). |
+| `value_description` | no | A description of the event value (see [Event schema](index.md#event-schema)). |
+| `extra_properties` | no | The type and description of each extra property sent with the event. |
+| `identifiers` | no | A list of identifiers sent with the event. Can be set to one or more of `project`, `user`, or `namespace`. |
+| `iglu_schema_url` | no | The URL to the custom schema sent with the event, for example, `iglu:com.gitlab/gitlab_experiment/jsonschema/1-0-0`. |
+| `product_section` | yes | The [section](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/data/sections.yml). |
+| `product_stage` | no | The [stage](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) for the event. |
+| `product_group` | yes | The [group](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) that owns the event. |
+| `milestone` | no | The milestone when the event is introduced. |
+| `introduced_by_url` | no | The URL to the merge request that introduced the event. |
+| `distributions` | yes | The [distributions](https://about.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/#definitions) where the tracked feature is available. Can be set to one or more of `ce` or `ee`. |
+| `tiers` | yes | The [tiers](https://about.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/) where the tracked feature is available. Can be set to one or more of `free`, `premium`, or `ultimate`. |
+
+### Example event definition
+
+The linked [`uuid`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/events/epics_promote.yml)
+YAML file includes an example event definition.
+
+```yaml
+description: Issue promoted to epic
+category: epics
+action: promote
+property_description: The string "issue_id"
+value_description: ID of the issue
+extra_properties:
+ weight:
+ type: integer
+ description: Weight of the issue
+identifiers:
+- project
+- user
+- namespace
+product_section: dev
+product_stage: plan
+product_group: group::product planning
+milestone: "11.10"
+introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/10537
+distributions:
+- ee
+tiers:
+- premium
+- ultimate
+```
+
+## Create a new event definition
+
+Use the dedicated [event definition generator](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/generators/gitlab/snowplow_event_definition_generator.rb)
+to create new event definitions.
+
+The `category` and `action` of each event are included in the filename to standardize file naming.
+
+The generator takes three options:
+
+- `--ee`: Indicates if the event is for EE.
+- `--category=CATEGORY`: Indicates the `category` of the event.
+- `--action=ACTION`: Indicates the `action` of the event.
+
+```shell
+bundle exec rails generate gitlab:snowplow_event_definition --category Groups::EmailCampaignsController --action click
+create create config/events/groups__email_campaigns_controller_click.yml
+```
diff --git a/doc/development/internal_analytics/snowplow/implementation.md b/doc/development/internal_analytics/snowplow/implementation.md
new file mode 100644
index 00000000000..5ad97cf528c
--- /dev/null
+++ b/doc/development/internal_analytics/snowplow/implementation.md
@@ -0,0 +1,523 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Implement Snowplow tracking
+
+This page describes how to:
+
+- Implement Snowplow frontend and backend tracking
+- Test Snowplow events
+
+## Event definitions
+
+Every Snowplow event, regardless of frontend or backend, requires a corresponding event definition. These definitions document the event and its properties to make it easier to maintain and analyze.
+These definitions can be browsed in the [event dictionary](https://metrics.gitlab.com/snowplow/). The [event dictionary guide](event_dictionary_guide.md) provides instructions for setting up an event definition.
+
+## Snowplow JavaScript frontend tracking
+
+GitLab provides a `Tracking` interface that wraps the [Snowplow JavaScript tracker](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/javascript-trackers/)
+to track custom events.
+
+For the recommended frontend tracking implementation, see [Usage recommendations](#usage-recommendations).
+
+Structured events and page views include the [`gitlab_standard`](schemas.md#gitlab_standard)
+context, using the `window.gl.snowplowStandardContext` object which includes
+[default data](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/views/layouts/_snowplow.html.haml)
+as base:
+
+| Property | Example |
+| -------- | ------- |
+| `context_generated_at` | `"2022-01-01T01:00:00.000Z"` |
+| `environment` | `"production"` |
+| `extra` | `{}` |
+| `namespace_id` | `123` |
+| `plan` | `"gold"` |
+| `project_id` | `456` |
+| `source` | `"gitlab-rails"` |
+| `user_id` | `789`* |
+| `is_gitlab_team_member` | `true`|
+
+_\* Undergoes a pseudonymization process at the collector level._
+
+These properties [are overridden](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/tracking/get_standard_context.js)
+with frontend-specific values, like `source` (`gitlab-javascript`), `google_analytics_id`
+and the custom `extra` object. You can modify this object for any subsequent
+structured event that fires, although this is not recommended.
+
+Tracking implementations must have an `action` and a `category`. You can provide additional
+properties from the [event schema](index.md#event-schema), in
+addition to an `extra` object that accepts key-value pairs.
+
+| Property | Type | Default value | Description |
+|:-----------|:-------|:---------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `category` | string | `document.body.dataset.page` | Page or subsection of a page in which events are captured. |
+| `action` | string | `'generic'` | Action the user is taking. Clicks must be `click` and activations must be `activate`. For example, focusing a form field is `activate_form_input`, and clicking a button is `click_button`. |
+| `data` | object | `{}` | Additional data such as `label`, `property`, `value` as described in [Event schema](index.md#event-schema), `context` for custom contexts, and `extra` (key-value pairs object). |
+
+### Usage recommendations
+
+- Use [data attributes](#implement-data-attribute-tracking) on HTML elements that emit `click`, `show.bs.dropdown`, or `hide.bs.dropdown` events.
+- Use the [Vue mixin](#implement-vue-component-tracking) for tracking custom events, or if the supported events for data attributes are not propagating. For example, clickable components that don't emit `click`.
+- Use the [tracking class](#implement-raw-javascript-tracking) when tracking in vanilla JavaScript files.
+
+### Implement data attribute tracking
+
+To implement tracking for HAML or Vue templates, add a [`data-track` attribute](#data-track-attributes) to the element.
+
+The following example shows `data-track-*` attributes assigned to a button:
+
+```haml
+%button.btn{ data: { track_action: "click_button", track_label: "template_preview", track_property: "my-template" } }
+```
+
+```html
+<button class="btn"
+ data-track-action="click_button"
+ data-track-label="template_preview"
+ data-track-property="my-template"
+ data-track-extra='{ "template_variant": "primary" }'
+/>
+```
+
+#### `data-track` attributes
+
+| Attribute | Required | Description |
+|:----------------------|:---------|:------------|
+| `data-track-action` | true | Action the user is taking. Clicks must be prepended with `click` and activations must be prepended with `activate`. For example, focusing a form field is `activate_form_input` and clicking a button is `click_button`. Replaces `data-track-event`, which was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/290962) in GitLab 13.11. |
+| `data-track-label` | false | The specific element or object to act on. This can be: the label of the element, for example, a tab labeled 'Create from template' for `create_from_template`; a unique identifier if no text is available, for example, `groups_dropdown_close` for closing the Groups dropdown list; or the name or title attribute of a record being created. |
+| `data-track-property` | false | Any additional property of the element, or object being acted on. |
+| `data-track-value` | false | Describes a numeric value (decimal) directly related to the event. This could be the value of an input. For example, `10` when clicking `internal` visibility. If omitted, this is the element's `value` property or `undefined`. For checkboxes, the default value is the element's checked attribute or `0` when unchecked. The value is parsed as numeric before sending the event. |
+| `data-track-extra` | false | A key-value pair object passed as a valid JSON string. This attribute is added to the `extra` property in our [`gitlab_standard`](schemas.md#gitlab_standard) schema. |
+| `data-track-context` | false | To append a custom context object, passed as a valid JSON string. |
+
+#### Event listeners
+
+Event listeners bind at the document level to handle click events in elements with data attributes.
+This allows them to be handled when the DOM re-renders or changes. Document-level binding reduces
+the likelihood that click events stop propagating up the DOM tree.
+
+If click events stop propagating, you must implement listeners and [Vue component tracking](#implement-vue-component-tracking) or [raw JavaScript tracking](#implement-raw-javascript-tracking).
+
+#### Helper methods
+
+You can use the following Ruby helpers:
+
+```ruby
+tracking_attrs(label, action, property) # { data: { track_label... } }
+
+tracking_attrs_data(label, action, property) # { track_label... }
+```
+
+You can also use it on HAML templates:
+
+```haml
+%button{ **tracking_attrs('main_navigation', 'click_button', 'navigation') }
+
+// When merging with additional data
+// %button{ data: { platform: "...", **tracking_attrs_data('main_navigation', 'click_button', 'navigation') } }
+```
+
+If you use the GitLab helper method [`nav_link`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/helpers/tab_helper.rb#L76), you must wrap `html_options` under the `html_options` keyword argument. If you
+use the `ActionView` helper method [`link_to`](https://api.rubyonrails.org/classes/ActionView/Helpers/UrlHelper.html#method-i-link_to), you don't need to wrap `html_options`.
+
+```ruby
+# Bad
+= nav_link(controller: ['dashboard/groups', 'explore/groups'], data: { track_label: "explore_groups",
+track_action: "click_button" })
+
+# Good
+= nav_link(controller: ['dashboard/groups', 'explore/groups'], html_options: { data: { track_label:
+"explore_groups", track_action: "click_button" } })
+
+# Good (other helpers)
+= link_to explore_groups_path, title: _("Explore"), data: { track_label: "explore_groups", track_action:
+"click_button" }
+```
+
+### Implement Vue component tracking
+
+For custom event tracking, use the [Vue mixin](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/tracking/tracking.js#L207). It exposes `Tracking.event` as the `track` method.
+You can specify tracking options by creating a `tracking` data object or
+computed property, and as a second parameter: `this.track('click_button', opts)`.
+These options override any defaults and allow the values to be dynamic from props or based on state:
+
+| Property | Type | Default | Example |
+| -- | -- | -- | -- |
+| `category` | string | `document.body.dataset.page` | `'code_quality_walkthrough'` |
+| `label` | string | `''` | `'process_start_button'` |
+| `property` | string | `''` | `'asc'` or `'desc'` |
+| `value` | integer | `undefined` | `0`, `1`, `500` |
+| `extra` | object | `{}` | `{ selectedVariant: this.variant }` |
+
+To implement Vue component tracking:
+
+1. Import the `Tracking` library and call the `mixin` method:
+
+ ```javascript
+ import Tracking from '~/tracking';
+
+ const trackingMixin = Tracking.mixin();
+
+ // Optionally provide default properties
+ // const trackingMixin = Tracking.mixin({ label: 'right_sidebar' });
+ ```
+
+1. Use the mixin in the component:
+
+ ```javascript
+ export default {
+ mixins: [trackingMixin],
+ // Or
+ // mixins: [Tracking.mixin()],
+ // mixins: [Tracking.mixin({ label: 'right_sidebar' })],
+
+ data() {
+ return {
+ expanded: false,
+ };
+ },
+ };
+ ```
+
+1. You can specify tracking options in by creating a `tracking` data object
+or computed property:
+
+ ```javascript
+ export default {
+ name: 'RightSidebar',
+
+ mixins: [Tracking.mixin()],
+
+ data() {
+ return {
+ expanded: false,
+ variant: '',
+ tracking: {
+ label: 'right_sidebar',
+ // property: '',
+ // value: '',
+ // experiment: '',
+ // extra: {},
+ },
+ };
+ },
+
+ // Or
+ // computed: {
+ // tracking() {
+ // return {
+ // property: this.variant,
+ // extra: { expanded: this.expanded },
+ // };
+ // },
+ // },
+ };
+ ```
+
+1. Call the `track` method. Tracking options can be passed as the second parameter:
+
+ ```javascript
+ this.track('click_button', {
+ label: 'right_sidebar',
+ });
+ ```
+
+ Or use the `track` method in the template:
+
+ ```html
+ <template>
+ <div>
+ <button data-testid="toggle" @click="toggle">Toggle</button>
+
+ <div v-if="expanded">
+ <p>Hello world!</p>
+ <button @click="track('click_button')">Track another event</button>
+ </div>
+ </div>
+ </template>
+ ```
+
+#### Testing example
+
+```javascript
+export default {
+ name: 'CountDropdown',
+
+ mixins: [Tracking.mixin({ label: 'count_dropdown' })],
+
+ data() {
+ return {
+ variant: 'counter',
+ count: 0,
+ };
+ },
+
+ methods: {
+ handleChange({ target }) {
+ const { variant } = this;
+
+ this.count = Number(target.value);
+
+ this.track('change_value', {
+ value: this.count,
+ extra: { variant }
+ });
+ },
+ },
+};
+```
+
+```javascript
+import { mockTracking } from 'helpers/tracking_helper';
+// mockTracking(category, documentOverride, spyMethod)
+
+describe('CountDropdown.vue', () => {
+ let trackingSpy;
+ let wrapper;
+
+ ...
+
+ beforeEach(() => {
+ trackingSpy = mockTracking(undefined, wrapper.element, jest.spyOn);
+ });
+
+ const findDropdown = () => wrapper.find('[data-testid="dropdown"]');
+
+ it('tracks change event', () => {
+ const dropdown = findDropdown();
+ dropdown.element.value = 30;
+ dropdown.trigger('change');
+
+ expect(trackingSpy).toHaveBeenCalledWith(undefined, 'change_value', {
+ value: 30,
+ label: 'count_dropdown',
+ extra: { variant: 'counter' },
+ });
+ });
+});
+```
+
+### Implement raw JavaScript tracking
+
+To track from a vanilla JavaScript file, use the `Tracking.event` static function
+(calls [`dispatchSnowplowEvent`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/tracking/dispatch_snowplow_event.js)).
+
+The following example demonstrates tracking a click on a button by manually calling `Tracking.event`.
+
+```javascript
+import Tracking from '~/tracking';
+
+const button = document.getElementById('create_from_template_button');
+
+button.addEventListener('click', () => {
+ Tracking.event(undefined, 'click_button', {
+ label: 'create_from_template',
+ property: 'template_preview',
+ extra: {
+ templateVariant: 'primary',
+ valid: 1,
+ },
+ });
+});
+```
+
+#### Testing example
+
+```javascript
+import Tracking from '~/tracking';
+
+describe('MyTracking', () => {
+ let wrapper;
+
+ beforeEach(() => {
+ jest.spyOn(Tracking, 'event');
+ });
+
+ const findButton = () => wrapper.find('[data-testid="create_from_template"]');
+
+ it('tracks event', () => {
+ findButton().trigger('click');
+
+ expect(Tracking.event).toHaveBeenCalledWith(undefined, 'click_button', {
+ label: 'create_from_template',
+ property: 'template_preview',
+ extra: {
+ templateVariant: 'primary',
+ valid: true,
+ },
+ });
+ });
+});
+```
+
+### Form tracking
+
+To enable Snowplow automatic [form tracking](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/javascript-trackers/javascript-tracker/javascript-tracker-v2/tracking-specific-events/#form-tracking):
+
+1. Call `Tracking.enableFormTracking` when the DOM is ready.
+1. Provide a `config` object that includes at least one of the following elements:
+ - `forms` determines the forms to track. Identified by the CSS class name.
+ - `fields` determines the fields inside the tracked forms to track. Identified by the field `name`.
+1. Optional. Provide a list of contexts as the second argument. The [`gitlab_standard`](schemas.md#gitlab_standard) schema is excluded from these events.
+
+```javascript
+Tracking.enableFormTracking({
+ forms: { allow: ['sign-in-form', 'password-recovery-form'] },
+ fields: { allow: ['terms_and_conditions', 'newsletter_agreement'] },
+});
+```
+
+#### Testing example
+
+```javascript
+import Tracking from '~/tracking';
+
+describe('MyFormTracking', () => {
+ let formTrackingSpy;
+
+ beforeEach(() => {
+ formTrackingSpy = jest
+ .spyOn(Tracking, 'enableFormTracking')
+ .mockImplementation(() => null);
+ });
+
+ it('initialized with the correct configuration', () => {
+ expect(formTrackingSpy).toHaveBeenCalledWith({
+ forms: { allow: ['sign-in-form', 'password-recovery-form'] },
+ fields: { allow: ['terms_and_conditions', 'newsletter_agreement'] },
+ });
+ });
+});
+```
+
+## Implement Ruby backend tracking
+
+`Gitlab::Tracking` is an interface that wraps the [Snowplow Ruby Tracker](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/ruby-tracker/) for tracking custom events.
+Backend tracking provides:
+
+- User behavior tracking
+- Instrumentation to monitor and visualize performance over time in a section or aspect of code.
+
+To add custom event tracking and instrumentation, call the `GitLab::Tracking.event` class method.
+For example:
+
+```ruby
+class Projects::CreateService < BaseService
+ def execute
+ project = Project.create(params)
+
+ Gitlab::Tracking.event('Projects::CreateService', 'create_project', label: project.errors.full_messages.to_sentence,
+ property: project.valid?.to_s, project: project, user: current_user, namespace: namespace)
+ end
+end
+```
+
+Use the following arguments:
+
+| Argument | Type | Default value | Description |
+|------------|---------------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------|
+| `category` | String | | Area or aspect of the application. For example, `HealthCheckController` or `Lfs::FileTransformer`. |
+| `action` | String | | The action being taken. For example, a controller action such as `create`, or an Active Record callback. |
+| `label` | String | `nil` | The specific element or object to act on. This can be one of the following: the label of the element, for example, a tab labeled 'Create from template' for `create_from_template`; a unique identifier if no text is available, for example, `groups_dropdown_close` for closing the Groups dropdown list; or the name or title attribute of a record being created. |
+| `property` | String | `nil` | Any additional property of the element, or object being acted on. |
+| `value` | Numeric | `nil` | Describes a numeric value (decimal) directly related to the event. This could be the value of an input. For example, `10` when clicking `internal` visibility. |
+| `context` | Array\[SelfDescribingJSON\] | `nil` | An array of custom contexts to send with this event. Most events should not have any custom contexts. |
+| `project` | Project | `nil` | The project associated with the event. |
+| `user` | User | `nil` | The user associated with the event. This value undergoes a pseudonymization process at the collector level. |
+| `namespace` | Namespace | `nil` | The namespace associated with the event. |
+| `extra` | Hash | `{}` | Additional keyword arguments are collected into a hash and sent with the event. |
+
+### Unit testing
+
+To test backend Snowplow events, use the `expect_snowplow_event` helper. For more information, see
+[testing best practices](../../testing_guide/best_practices.md#test-snowplow-events).
+
+### Performance
+
+We use the [AsyncEmitter](https://snowplow.github.io/snowplow-ruby-tracker/SnowplowTracker/AsyncEmitter.html) when tracking events, which allows for instrumentation calls to be run in a background thread. This is still an active area of development.
+
+## Develop and test Snowplow
+
+To develop and test a Snowplow event, there are several tools to test frontend and backend events:
+
+| Testing Tool | Frontend Tracking | Backend Tracking | Local Development Environment | Production Environment | Production Environment |
+|----------------------------------------------|--------------------|---------------------|-------------------------------|------------------------|------------------------|
+| Snowplow Analytics Debugger Chrome Extension | Yes | No | Yes | Yes | Yes |
+| Snowplow Inspector Chrome Extension | Yes | No | Yes | Yes | Yes |
+| Snowplow Micro | Yes | Yes | Yes | No | No |
+
+### Test frontend events
+
+Before you test frontend events in development, you must:
+
+1. [Enable Snowplow tracking in the Admin Area](index.md#enable-snowplow-tracking).
+1. Turn off ad blockers that could prevent Snowplow JavaScript from loading in your environment.
+1. Turn off "Do Not Track" (DNT) in your browser.
+
+All URLs are pseudonymized. The entity identifier [replaces](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/javascript-trackers/javascript-tracker/javascript-tracker-v2/tracker-setup/other-parameters-2/#setting-a-custom-page-url-and-referrer-url) personally identifiable
+information (PII). PII includes usernames, group, and project names.
+Page titles are hardcoded as `GitLab` for the same reason.
+
+#### Snowplow Analytics Debugger Chrome Extension
+
+[Snowplow Analytics Debugger](https://www.iglooanalytics.com/blog/snowplow-analytics-debugger-chrome-extension.html) is a browser extension for testing frontend events. It works in production, staging, and local development environments.
+
+1. Install the [Snowplow Analytics Debugger](https://chrome.google.com/webstore/detail/snowplow-analytics-debugg/jbnlcgeengmijcghameodeaenefieedm) Chrome browser extension.
+1. Open Chrome DevTools to the Snowplow Analytics Debugger tab.
+
+#### Snowplow Inspector Chrome Extension
+
+Snowplow Inspector Chrome Extension is a browser extension for testing frontend events. This works in production, staging, and local development environments.
+
+<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
+For a video tutorial, see the [Snowplow plugin walk through](https://www.youtube.com/watch?v=g4rqnIZ1Mb4).
+
+1. Install [Snowplow Inspector](https://chrome.google.com/webstore/detail/snowplow-inspector/maplkdomeamdlngconidoefjpogkmljm?hl=en).
+1. To open the extension, select the Snowplow Inspector icon beside the address bar.
+1. Click around on a webpage with Snowplow to see JavaScript events firing in the inspector window.
+
+### Test backend events with Snowplow Micro
+
+[Snowplow Micro](https://snowplow.io/blog/introducing-snowplow-micro/) is a
+Docker-based solution for testing backend and frontend in a local development environment. Snowplow Micro
+records the same events as the full Snowplow pipeline. To query events, use the Snowplow Micro API.
+
+It can be set up automatically using [GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit).
+See the [how-to docs](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/snowplow_micro.md) for more details.
+
+1. Set the environment variable to tell the GDK to use Snowplow Micro in development. This overrides two `application_settings` options:
+ - `snowplow_enabled` setting will instead return `true` from `Gitlab::Tracking.enabled?`
+ - `snowplow_collector_hostname` setting will instead always return `localhost:9090` (or whatever port is set for `snowplow_micro.port` GDK setting) from `Gitlab::Tracking.collector_hostname`.
+With Snowplow Micro set up you can now manually test backend Snowplow events:
+
+1. Send a test Snowplow event from the Rails console:
+
+ ```ruby
+ Gitlab::Tracking.event('category', 'action')
+ ```
+
+1. Navigate to `localhost:9090/micro/good` to see the event.
+
+#### Useful links
+
+- [Snowplow Micro repository](https://github.com/snowplow-incubator/snowplow-micro)
+- [Installation guide recording](https://www.youtube.com/watch?v=OX46fo_A0Ag)
+
+### Troubleshoot
+
+To control content security policy warnings when using an external host, modify `config/gitlab.yml`
+to allow or prevent them. To allow them, add the relevant host for `connect_src`. For example, for
+`https://snowplow.trx.gitlab.net`:
+
+```yaml
+development:
+ <<: *base
+ gitlab:
+ content_security_policy:
+ enabled: true
+ directives:
+ connect_src: "'self' http://localhost:* http://127.0.0.1:* ws://localhost:* wss://localhost:* ws://127.0.0.1:* https://snowplow.trx.gitlab.net/"
+```
diff --git a/doc/development/internal_analytics/snowplow/index.md b/doc/development/internal_analytics/snowplow/index.md
new file mode 100644
index 00000000000..8265bceaf06
--- /dev/null
+++ b/doc/development/internal_analytics/snowplow/index.md
@@ -0,0 +1,201 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Snowplow development guidelines
+
+Snowplow is an enterprise-grade marketing and Analytics Instrumentation platform that tracks how users engage with our website and application.
+
+[Snowplow](https://snowplow.io/) consists of several loosely-coupled sub-systems:
+
+- **Trackers** fire Snowplow events. Snowplow has twelve trackers that cover web, mobile, desktop, server, and IoT.
+- **Collectors** receive Snowplow events from trackers. We use different event collectors that synchronize events to Amazon S3, Apache Kafka, or Amazon Kinesis.
+- **Enrich** cleans raw Snowplow events, enriches them, and puts them into storage. There is a Hadoop-based enrichment process, and a Kinesis-based or Kafka-based process.
+- **Storage** stores Snowplow events. We store the Snowplow events in a flat file structure on S3, and in the Redshift and PostgreSQL databases.
+- **Data modeling** joins event-level data with other data sets, aggregates them into smaller data sets, and applies business logic. This produces a clean set of tables for data analysis. We use data models for Redshift and Looker.
+- **Analytics** are performed on Snowplow events or on aggregate tables.
+
+![Snowplow flow](../../img/snowplow_flow.png)
+
+## Enable Snowplow tracking
+
+Tracking can be enabled at:
+
+- The instance level, which enables tracking on both the frontend and backend layers.
+- The user level. User tracking can be disabled on a per user basis.
+ GitLab respects the [Do Not Track](https://www.eff.org/issues/do-not-track) standard, so any user who has enabled the Do Not Track option in their browser is not tracked at a user level.
+
+Snowplow tracking is configured to send data for GitLab.com to a collector configured by GitLab. By default, self-managed
+instances do not have a collector configured and do not collect data via Snowplow.
+
+You can configure your self-managed GitLab instance to use a custom Snowplow collector.
+
+1. On the left sidebar, expand the top-most chevron (**{chevron-down}**).
+1. Select **Admin Area**.
+1. On the left sidebar, select **Settings > General**.
+1. Expand **Snowplow**.
+1. Select **Enable Snowplow tracking** and enter your Snowplow configuration information. For example:
+
+ | Name | Value |
+ |--------------------|-------------------------------|
+ | Collector hostname | `your-snowplow-collector.net` |
+ | App ID | `gitlab` |
+ | Cookie domain | `.your-gitlab-instance.com` |
+
+1. Select **Save changes**.
+
+## Snowplow request flow
+
+The following example shows a basic request/response flow between the following components:
+
+- Snowplow JS / Ruby Trackers on GitLab.com
+- [GitLab.com Snowplow Collector](https://gitlab.com/gitlab-com/gl-infra/readiness/-/blob/master/library/snowplow/index.md)
+- The GitLab S3 Bucket
+- The GitLab Snowflake Data Warehouse
+- Sisense:
+
+```mermaid
+sequenceDiagram
+ participant Snowplow JS (Frontend)
+ participant Snowplow Ruby (Backend)
+ participant GitLab.com Snowplow Collector
+ participant S3 Bucket
+ participant Snowflake DW
+ participant Sisense Dashboards
+ Snowplow JS (Frontend) ->> GitLab.com Snowplow Collector: FE Tracking event
+ Snowplow Ruby (Backend) ->> GitLab.com Snowplow Collector: BE Tracking event
+ loop Process using Kinesis Stream
+ GitLab.com Snowplow Collector ->> GitLab.com Snowplow Collector: Log raw events
+ GitLab.com Snowplow Collector ->> GitLab.com Snowplow Collector: Enrich events
+ GitLab.com Snowplow Collector ->> GitLab.com Snowplow Collector: Write to disk
+ end
+ GitLab.com Snowplow Collector ->> S3 Bucket: Kinesis Firehose
+ Note over GitLab.com Snowplow Collector, S3 Bucket: Pseudonymization
+ S3 Bucket->>Snowflake DW: Import data
+ Snowflake DW->>Snowflake DW: Transform data using dbt
+ Snowflake DW->>Sisense Dashboards: Data available for querying
+```
+
+For more details about the architecture, see [Snowplow infrastructure](infrastructure.md).
+
+## Event schema
+
+All the events must be consistent. If each feature captures events differently, it can be difficult
+to perform analysis.
+
+Each event provides attributes that describe the event.
+
+| Attribute | Type | Required | Description |
+| --------- | ------- | -------- | ----------- |
+| category | text | true | The page or backend section of the application. Unless infeasible, use the Rails page attribute by default in the frontend, and namespace + class name on the backend, for example, `Notes::CreateService`. |
+| action | text | true | The action the user takes, or aspect that's being instrumented. The first word must describe the action or aspect. For example, clicks must be `click`, activations must be `activate`, creations must be `create`. Use underscores to describe what was acted on. For example, activating a form field is `activate_form_input`, an interface action like clicking on a dropdown list is `click_dropdown`, a behavior like creating a project record from the backend is `create_project`. |
+| label | text | false | The specific element or object to act on. This can be one of the following: the label of the element, for example, a tab labeled 'Create from template' for `create_from_template`; a unique identifier if no text is available, for example, `groups_dropdown_close` for closing the Groups dropdown list; or the name or title attribute of a record being created. For Service Ping metrics adapted to Snowplow events, this should be the full metric [key path](../service_ping/metrics_dictionary.md#metric-key_path) taken from its definition file. |
+| property | text | false | Any additional property of the element, or object being acted on. For Service Ping metrics adapted to Snowplow events, this should be additional information or context that can help analyze the event. For example, in the case of `usage_activity_by_stage_monthly.create.merge_requests_users`, there are four different possible merge request actions: "create", "merge", "comment", and "close". Each of these would be a possible property value. |
+| value | decimal | false | Describes a numeric value (decimal) directly related to the event. This could be the value of an input. For example, `10` when clicking `internal` visibility. |
+| context | vector | false | Additional data in the form of a [self-describing JSON](https://docs.snowplow.io/docs/pipeline-components-and-applications/iglu/common-architecture/self-describing-json-schemas/) to describe the event if the attributes are not sufficient. Each context must have its schema defined to assure data integrity. Refer to the list of GitLab-defined contexts for more details. |
+
+### Examples
+
+| Category* | Label | Action | Property** | Value |
+|-------------|------------------|-----------------------|----------|:-----:|
+| `[root:index]` | `main_navigation` | `click_navigation_link` | `[link_label]` | - |
+| `[groups:boards:show]` | `toggle_swimlanes` | `click_toggle_button` | - | `[is_active]` |
+| `[projects:registry:index]` | `registry_delete` | `click_button` | - | - |
+| `[projects:registry:index]` | `registry_delete` | `confirm_deletion` | - | - |
+| `[projects:blob:show]` | `congratulate_first_pipeline` | `click_button` | `[human_access]` | - |
+| `[projects:clusters:new]` | `chart_options` | `generate_link` | `[chart_link]` | - |
+| `[projects:clusters:new]` | `chart_options` | `click_add_label_button` | `[label_id]` | - |
+| `API::NpmPackages` | `counts.package_events_i_package_push_package_by_deploy_token` | `push_package` | `npm` | - |
+
+_* If you choose to omit the category you can use the default._<br>
+_** Use property for variable strings._
+
+### Reference SQL
+
+#### Last 20 `reply_comment_button` events
+
+```sql
+SELECT
+ session_id,
+ event_id,
+ event_label,
+ event_action,
+ event_property,
+ event_value,
+ event_category,
+ contexts
+FROM legacy.snowplow_structured_events_all
+WHERE
+ event_label = 'reply_comment_button'
+ AND event_action = 'click_button'
+ -- AND event_category = 'projects:issues:show'
+ -- AND event_value = 1
+ORDER BY collector_tstamp DESC
+LIMIT 20
+```
+
+#### Last 100 page view events
+
+```sql
+SELECT
+ -- page_url,
+ -- page_title,
+ -- referer_url,
+ -- marketing_medium,
+ -- marketing_source,
+ -- marketing_campaign,
+ -- browser_window_width,
+ -- device_is_mobile
+ *
+FROM legacy.snowplow_page_views_30
+ORDER BY page_view_start DESC
+LIMIT 100
+```
+
+#### Top 20 users who fired `reply_comment_button` in the last 30 days
+
+```sql
+SELECT
+ count(*) as hits,
+ se_action,
+ se_category,
+ gsc_pseudonymized_user_id
+FROM legacy.snowplow_gitlab_events_30
+WHERE
+ se_label = 'reply_comment_button'
+ AND gsc_pseudonymized_user_id IS NOT NULL
+GROUP BY gsc_pseudonymized_user_id, se_category, se_action
+ORDER BY count(*) DESC
+LIMIT 20
+```
+
+#### Query JSON formatted data
+
+```sql
+SELECT
+ derived_tstamp,
+ contexts:data[0]:data:extra:old_format as CURRENT_FORMAT,
+ contexts:data[0]:data:extra:value as UPDATED_FORMAT
+FROM legacy.snowplow_structured_events_all
+WHERE event_action in ('wiki_format_updated')
+ORDER BY derived_tstamp DESC
+LIMIT 100
+```
+
+### Web-specific parameters
+
+Snowplow JavaScript adds [web-specific parameters](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/snowplow-tracker-protocol/#Web-specific_parameters) to all web events by default.
+
+## Related topics
+
+- [Snowplow data structure](https://docs.snowplow.io/docs/understanding-your-pipeline/canonical-event/)
+- [Our Iglu schema registry](https://gitlab.com/gitlab-org/iglu)
+- [List of events used in our codebase (Event Dictionary)](https://metrics.gitlab.com/snowplow/)
+- [Analytics Instrumentation Guide](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/)
+- [Service Ping Guide](../service_ping/index.md)
+- [Analytics Instrumentation Direction](https://about.gitlab.com/direction/analytics/analytics-instrumentation/)
+- [Data Analysis Process](https://about.gitlab.com/handbook/business-technology/data-team/#data-analysis-process/)
+- [Data for Product Managers](https://about.gitlab.com/handbook/business-technology/data-team/programs/data-for-product-managers/)
+- [Data Infrastructure](https://about.gitlab.com/handbook/business-technology/data-team/platform/infrastructure/)
diff --git a/doc/development/internal_analytics/snowplow/infrastructure.md b/doc/development/internal_analytics/snowplow/infrastructure.md
new file mode 100644
index 00000000000..9679abac6b7
--- /dev/null
+++ b/doc/development/internal_analytics/snowplow/infrastructure.md
@@ -0,0 +1,101 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Snowplow infrastructure
+
+Snowplow events on GitLab SaaS fired by a [tracker](implementation.md) go through an AWS pipeline, managed by GitLab.
+
+## Event flow in the AWS pipeline
+
+Every event goes through a collector, enricher, and pseudonymization lambda. The event is then dumped to S3 storage where it can be picked up by the Snowflake data warehouse.
+
+Deploying and managing the infrastructure is automated using Terraform in the current [Terraform repository](https://gitlab.com/gitlab-com/gl-infra/config-mgmt/-/tree/master/environments/aws-snowplow).
+
+```mermaid
+graph LR
+ GL[GitLab.com]-->COL
+
+ subgraph aws-cloud[AWS]
+ COL[Collector]-->|snowplow-raw-good|ENR
+ COL[Collector]-->|snowplow-raw-bad|FRBE
+ subgraph firehoserbe[Firehose]
+ FRBE[AWS Lambda]
+ end
+ FRBE-->S3RBE
+
+ ENR[Enricher]-->|snowplow-enriched-bad|FEBE
+ subgraph firehoseebe[Firehose]
+ FEBE[AWS Lambda]
+ end
+ FEBE-->S3EBE
+
+ ENR[Enricher]-->|snowplow-enriched-good|FRGE
+ subgraph firehosege[Firehose]
+ FRGE[AWS Lambda]
+ end
+ FRGE-->S3GE
+ end
+
+ subgraph snowflake[Data warehouse]
+ S3RBE[S3 raw-bad]-->BE[gitlab_bad_events]
+ S3EBE[S3 enriched-bad]-->BE[gitlab_bad_events]
+ S3GE[S3 output]-->GE[gitlab_events]
+ end
+```
+
+See [Snowplow technology 101](https://github.com/snowplow/snowplow/#snowplow-technology-101) for Snowplow's own documentation and an overview how collectors and enrichers work.
+
+### Pseudonymization
+
+In contrast to a typical Snowplow pipeline, after enrichment, GitLab Snowplow events go through a [pseudonymization service](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/snowplow-pseudonymization) in the form of an AWS Lambda service before they are stored in S3 storage.
+
+#### Why events need to be pseudonymized
+
+GitLab is bound by its [obligations to community](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/service-usage-data-commitment/)
+and by [legal regulations](https://about.gitlab.com/handbook/legal/privacy/services-usage-data/) to protect the privacy of its users.
+
+GitLab must provide valuable insights for business decisions, and there is a need
+for a better understanding of different users' behavior patterns. The
+pseudonymization process helps you find a compromise between these two requirements.
+
+Pseudonymization processes personally identifiable information inside a Snowplow event in an irreversible fashion
+maintaining deterministic output for given input, while masking any relation to that input.
+
+#### How events are pseudonymized
+
+Pseudonymization uses an allowlist that provides privacy by default. Therefore, each
+attribute received as part of a Snowplow event is pseudonymized unless the attribute
+is an allowed exception.
+
+Pseudonymization is done using the HMAC-SHA256 keyed hash algorithm.
+Attributes are combined with a secret salt to replace each identifiable information with a pseudonym.
+
+### S3 bucket data lake to Snowflake
+
+See [Data team's Snowplow Overview](https://about.gitlab.com/handbook/business-technology/data-team/platform/snowplow/) for further details how data is ingested into our Snowflake data warehouse.
+
+## Monitoring
+
+There are several tools that monitor Snowplow events tracking in different stages of the processing pipeline:
+
+- [Analytics Instrumentation Grafana dashboard](https://dashboards.gitlab.net/d/product-intelligence-main/product-intelligence-product-intelligence?orgId=1) monitors backend events sent from a GitLab.com instance to a collectors fleet. This dashboard provides information about:
+ - The number of events that successfully reach Snowplow collectors.
+ - The number of events that failed to reach Snowplow collectors.
+ - The number of backend events that were sent.
+- [AWS CloudWatch dashboard](https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#dashboards:name=SnowPlow;start=P3D) monitors the state of the events in a processing pipeline. The pipeline starts from Snowplow collectors, goes through to enrichers and pseudonymization, and then up to persistence in an S3 bucket. From S3, the events are imported into the Snowflake Data Warehouse. You must have AWS access rights to view this dashboard. For more information, see [monitoring](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/snowplow-pseudonymization#monitoring) in the Snowplow Events pseudonymization service documentation.
+- [Sisense dashboard](https://app.periscopedata.com/app/gitlab/417669/Snowplow-Summary-Dashboard) provides information about the number of good and bad events imported into the Data Warehouse, in addition to the total number of imported Snowplow events.
+
+For more information, see this [video walk-through](https://www.youtube.com/watch?v=NxPS0aKa_oU).
+
+## Related topics
+
+- [Snowplow technology 101](https://github.com/snowplow/snowplow/#snowplow-technology-101)
+- [Snowplow pseudonymization AWS Lambda project](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/snowplow-pseudonymization)
+- [Analytics Instrumentation Guide](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/)
+- [Data Infrastructure](https://about.gitlab.com/handbook/business-technology/data-team/platform/infrastructure/)
+- [Snowplow architecture overview (internal)](https://www.youtube.com/watch?v=eVYJjzspsLU)
+- [Snowplow architecture overview slide deck (internal)](https://docs.google.com/presentation/d/16gQEO5CAg8Tx4NBtfnZj-GF4juFI6HfEPWcZgH4Rn14/edit?usp=sharing)
+- [AWS Lambda implementation (internal)](https://youtu.be/cQd0mdMhkQA)
diff --git a/doc/development/internal_analytics/snowplow/review_guidelines.md b/doc/development/internal_analytics/snowplow/review_guidelines.md
new file mode 100644
index 00000000000..03d1812cbfc
--- /dev/null
+++ b/doc/development/internal_analytics/snowplow/review_guidelines.md
@@ -0,0 +1,44 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Snowplow review guidelines
+
+This page includes introductory material for an
+[Analytics Instrumentation](https://about.gitlab.com/handbook/engineering/development/analytics/analytics-instrumentation/)
+review, and is specific to Snowplow related reviews. For broader advice and
+general best practices for code reviews, refer to our [code review guide](../../code_review.md).
+
+## Resources for reviewers
+
+- [Snowplow Guide](index.md)
+- [Event Dictionary](https://metrics.gitlab.com/snowplow/)
+
+## Review process
+
+We recommend an Analytics Instrumentation review when a merge request (MR) involves changes in
+events or touches Snowplow related files.
+
+### Roles and process
+
+#### The merge request **author** should
+
+- For frontend events, when relevant, add a screenshot of the event in
+ the [testing tool](implementation.md#develop-and-test-snowplow) used.
+- For backend events, when relevant, add the output of the
+ [Snowplow Micro](implementation.md#test-backend-events-with-snowplow-micro) good events
+ `GET http://localhost:9090/micro/good` (it might be a good idea
+ to reset with `GET http://localhost:9090/micro/reset` first).
+- Add or update the event definition file according to the [Event Dictionary Guide](event_dictionary_guide.md).
+
+#### The Analytics Instrumentation **reviewer** should
+
+- Check that the [event schema](index.md#event-schema) is correct.
+- Check the [usage recommendations](implementation.md#usage-recommendations).
+- Check that an event definition file was created or updated in accordance with the [Event Dictionary Guide](event_dictionary_guide.md).
+- If needed, check that the events are firing locally using one of the
+[testing tools](implementation.md#develop-and-test-snowplow) available.
+- Approve the MR, and relabel the MR with `~"analytics instrumentation::approved"`.
+- If the snowplow event mirrors a RedisHLL event, then tag @mdrussell to review if the payload is usable for this purpose.
diff --git a/doc/development/internal_analytics/snowplow/schemas.md b/doc/development/internal_analytics/snowplow/schemas.md
new file mode 100644
index 00000000000..21142f68d39
--- /dev/null
+++ b/doc/development/internal_analytics/snowplow/schemas.md
@@ -0,0 +1,190 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Snowplow schemas
+
+This page provides Snowplow schema reference for GitLab events.
+
+## `gitlab_standard`
+
+We are including the [`gitlab_standard` schema](https://gitlab.com/gitlab-org/iglu/-/blob/master/public/schemas/com.gitlab/gitlab_standard/jsonschema/) for structured events and page views.
+
+The [`StandardContext`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/tracking/standard_context.rb)
+class represents this schema in the application. Some properties are
+[automatically populated for frontend events](implementation.md#snowplow-javascript-frontend-tracking),
+and can be [provided manually for backend events](implementation.md#implement-ruby-backend-tracking).
+
+| Field Name | Required | Default value | Type | Description |
+|-------------------------|:-------------------:|------------------------------|---------------------------|-------------------------------------------------------------------------------------------------------------------------|
+| `project_id` | **{dotted-circle}** | Current project ID * | integer | |
+| `namespace_id` | **{dotted-circle}** | Current group/namespace ID * | integer | |
+| `user_id` | **{dotted-circle}** | Current user ID * | integer | User database record ID attribute. This value undergoes a pseudonymization process at the collector level. |
+| `context_generated_at` | **{dotted-circle}** | Current timestamp | string (date time format) | Timestamp indicating when context was generated. |
+| `environment` | **{check-circle}** | Current environment | string (max 32 chars) | Name of the source environment, such as `production` or `staging` |
+| `source` | **{check-circle}** | Event source | string (max 32 chars) | Name of the source application, such as `gitlab-rails` or `gitlab-javascript` |
+| `plan` | **{dotted-circle}** | Current namespace plan * | string (max 32 chars) | Name of the plan for the namespace, such as `free`, `premium`, or `ultimate`. Automatically picked from the `namespace`. |
+| `google_analytics_id` | **{dotted-circle}** | GA ID value * | string (max 32 chars) | Google Analytics ID, present when set from our marketing sites. |
+| `is_gitlab_team_member` | **{dotted-circle}** | | boolean | Indicates if the events is triggered by a GitLab team member |
+| `extra` | **{dotted-circle}** | | JSON | Any additional data associated with the event, in the form of key-value pairs |
+
+_\* Default value present for frontend events only_
+
+## Default Schema
+
+Frontend events include a [web-specific schema](https://docs.snowplow.io/docs/understanding-your-pipeline/canonical-event/#web-specific-fields) provided by Snowplow.
+All URLs are pseudonymized. The entity identifier [replaces](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/javascript-trackers/javascript-tracker/javascript-tracker-v2/tracker-setup/other-parameters-2/#setting-a-custom-page-url-and-referrer-url) personally identifiable
+information (PII). PII includes usernames, group, and project names.
+Page titles are hardcoded as `GitLab` for the same reason.
+
+| Field Name | Required | Type | Description |
+|--------------------------|---------------------|-----------|----------------------------------------------------------------------------------------------------------------------------------|
+| `app_id` | **{check-circle}** | string | Unique identifier for website / application |
+| `base_currency` | **{dotted-circle}** | string | Reporting currency |
+| `br_colordepth` | **{dotted-circle}** | integer | Browser color depth |
+| `br_cookies` | **{dotted-circle}** | boolean | Does the browser permit cookies? |
+| `br_family` | **{dotted-circle}** | string | Browser family |
+| `br_features_director` | **{dotted-circle}** | boolean | Director plugin installed? |
+| `br_features_flash` | **{dotted-circle}** | boolean | Flash plugin installed? |
+| `br_features_gears` | **{dotted-circle}** | boolean | Google gears installed? |
+| `br_features_java` | **{dotted-circle}** | boolean | Java plugin installed? |
+| `br_features_pdf` | **{dotted-circle}** | boolean | Adobe PDF plugin installed? |
+| `br_features_quicktime` | **{dotted-circle}** | boolean | Quicktime plugin installed? |
+| `br_features_realplayer` | **{dotted-circle}** | boolean | RealPlayer plugin installed? |
+| `br_features_silverlight` | **{dotted-circle}** | boolean | Silverlight plugin installed? |
+| `br_features_windowsmedia` | **{dotted-circle}** | boolean | Windows media plugin installed? |
+| `br_lang` | **{dotted-circle}** | string | Language the browser is set to |
+| `br_name` | **{dotted-circle}** | string | Browser name |
+| `br_renderengine` | **{dotted-circle}** | string | Browser rendering engine |
+| `br_type` | **{dotted-circle}** | string | Browser type |
+| `br_version` | **{dotted-circle}** | string | Browser version |
+| `br_viewheight` | **{dotted-circle}** | string | Browser viewport height |
+| `br_viewwidth` | **{dotted-circle}** | string | Browser viewport width |
+| `collector_tstamp` | **{dotted-circle}** | timestamp | Time stamp for the event recorded by the collector |
+| `contexts` | **{dotted-circle}** | | |
+| `derived_contexts` | **{dotted-circle}** | | Contexts derived in the Enrich process |
+| `derived_tstamp` | **{dotted-circle}** | timestamp | Timestamp making allowance for inaccurate device clock |
+| `doc_charset` | **{dotted-circle}** | string | Web page's character encoding |
+| `doc_height` | **{dotted-circle}** | string | Web page height |
+| `doc_width` | **{dotted-circle}** | string | Web page width |
+| `domain_sessionid` | **{dotted-circle}** | string | Unique identifier (UUID) for this visit of this `user_id` to this domain |
+| `domain_sessionidx` | **{dotted-circle}** | integer | Index of number of visits that this `user_id` has made to this domain (The first visit is `1`) |
+| `domain_userid` | **{dotted-circle}** | string | Unique identifier for a user, based on a first party cookie (so domain specific) |
+| `dvce_created_tstamp` | **{dotted-circle}** | timestamp | Timestamp when event occurred, as recorded by client device |
+| `dvce_ismobile` | **{dotted-circle}** | boolean | Indicates whether device is mobile |
+| `dvce_screenheight` | **{dotted-circle}** | string | Screen / monitor resolution |
+| `dvce_screenwidth` | **{dotted-circle}** | string | Screen / monitor resolution |
+| `dvce_sent_tstamp` | **{dotted-circle}** | timestamp | Timestamp when event was sent by client device to collector |
+| `dvce_type` | **{dotted-circle}** | string | Type of device |
+| `etl_tags` | **{dotted-circle}** | string | JSON of tags for this ETL run |
+| `etl_tstamp` | **{dotted-circle}** | timestamp | Timestamp event began ETL |
+| `event` | **{dotted-circle}** | string | Event type |
+| `event_fingerprint` | **{dotted-circle}** | string | Hash client-set event fields |
+| `event_format` | **{dotted-circle}** | string | Format for event |
+| `event_id` | **{dotted-circle}** | string | Event UUID |
+| `event_name` | **{dotted-circle}** | string | Event name |
+| `event_vendor` | **{dotted-circle}** | string | The company who developed the event model |
+| `event_version` | **{dotted-circle}** | string | Version of event schema |
+| `geo_city` | **{dotted-circle}** | string | City of IP origin |
+| `geo_country` | **{dotted-circle}** | string | Country of IP origin |
+| `geo_latitude` | **{dotted-circle}** | string | An approximate latitude |
+| `geo_longitude` | **{dotted-circle}** | string | An approximate longitude |
+| `geo_region` | **{dotted-circle}** | string | Region of IP origin |
+| `geo_region_name` | **{dotted-circle}** | string | Region of IP origin |
+| `geo_timezone` | **{dotted-circle}** | string | Time zone of IP origin |
+| `geo_zipcode` | **{dotted-circle}** | string | Zip (postal) code of IP origin |
+| `ip_domain` | **{dotted-circle}** | string | Second level domain name associated with the visitor's IP address |
+| `ip_isp` | **{dotted-circle}** | string | Visitor's ISP |
+| `ip_netspeed` | **{dotted-circle}** | string | Visitor's connection type |
+| `ip_organization` | **{dotted-circle}** | string | Organization associated with the visitor's IP address – defaults to ISP name if none is found |
+| `mkt_campaign` | **{dotted-circle}** | string | The campaign ID |
+| `mkt_clickid` | **{dotted-circle}** | string | The click ID |
+| `mkt_content` | **{dotted-circle}** | string | The content or ID of the ad. |
+| `mkt_medium` | **{dotted-circle}** | string | Type of traffic source |
+| `mkt_network` | **{dotted-circle}** | string | The ad network to which the click ID belongs |
+| `mkt_source` | **{dotted-circle}** | string | The company / website where the traffic came from |
+| `mkt_term` | **{dotted-circle}** | string | Keywords associated with the referrer |
+| `name_tracker` | **{dotted-circle}** | string | The tracker namespace |
+| `network_userid` | **{dotted-circle}** | string | Unique identifier for a user, based on a cookie from the collector (so set at a network level and shouldn't be set by a tracker) |
+| `os_family` | **{dotted-circle}** | string | Operating system family |
+| `os_manufacturer` | **{dotted-circle}** | string | Manufacturers of operating system |
+| `os_name` | **{dotted-circle}** | string | Name of operating system |
+| `os_timezone` | **{dotted-circle}** | string | Client operating system time zone |
+| `page_referrer` | **{dotted-circle}** | string | Referrer URL |
+| `page_title` | **{dotted-circle}** | string | To not expose personal identifying information, the page title is hardcoded as `GitLab` |
+| `page_url` | **{dotted-circle}** | string | Page URL |
+| `page_urlfragment` | **{dotted-circle}** | string | Fragment aka anchor |
+| `page_urlhost` | **{dotted-circle}** | string | Host aka domain |
+| `page_urlpath` | **{dotted-circle}** | string | Path to page |
+| `page_urlport` | **{dotted-circle}** | integer | Port if specified, 80 if not |
+| `page_urlquery` | **{dotted-circle}** | string | Query string |
+| `page_urlscheme` | **{dotted-circle}** | string | Scheme (protocol name) |
+| `platform` | **{dotted-circle}** | string | The platform the app runs on |
+| `pp_xoffset_max` | **{dotted-circle}** | integer | Maximum page x offset seen in the last ping period |
+| `pp_xoffset_min` | **{dotted-circle}** | integer | Minimum page x offset seen in the last ping period |
+| `pp_yoffset_max` | **{dotted-circle}** | integer | Maximum page y offset seen in the last ping period |
+| `pp_yoffset_min` | **{dotted-circle}** | integer | Minimum page y offset seen in the last ping period |
+| `refr_domain_userid` | **{dotted-circle}** | string | The Snowplow `domain_userid` of the referring website |
+| `refr_dvce_tstamp` | **{dotted-circle}** | timestamp | The time of attaching the `domain_userid` to the inbound link |
+| `refr_medium` | **{dotted-circle}** | string | Type of referer |
+| `refr_source` | **{dotted-circle}** | string | Name of referer if recognised |
+| `refr_term` | **{dotted-circle}** | string | Keywords if source is a search engine |
+| `refr_urlfragment` | **{dotted-circle}** | string | Referer URL fragment |
+| `refr_urlhost` | **{dotted-circle}** | string | Referer host |
+| `refr_urlpath` | **{dotted-circle}** | string | Referer page path |
+| `refr_urlport` | **{dotted-circle}** | integer | Referer port |
+| `refr_urlquery` | **{dotted-circle}** | string | Referer URL query string |
+| `refr_urlscheme` | **{dotted-circle}** | string | Referer scheme |
+| `se_action` | **{dotted-circle}** | string | The action / event itself |
+| `se_category` | **{dotted-circle}** | string | The category of event |
+| `se_label` | **{dotted-circle}** | string | A label often used to refer to the 'object' the action is performed on |
+| `se_property` | **{dotted-circle}** | string | A property associated with either the action or the object |
+| `se_value` | **{dotted-circle}** | decimal | A value associated with the user action |
+| `ti_category` | **{dotted-circle}** | string | Item category |
+| `ti_currency` | **{dotted-circle}** | string | Currency |
+| `ti_name` | **{dotted-circle}** | string | Item name |
+| `ti_orderid` | **{dotted-circle}** | string | Order ID |
+| `ti_price` | **{dotted-circle}** | decimal | Item price |
+| `ti_price_base` | **{dotted-circle}** | decimal | Item price in base currency |
+| `ti_quantity` | **{dotted-circle}** | integer | Item quantity |
+| `ti_sku` | **{dotted-circle}** | string | Item SKU |
+| `tr_affiliation` | **{dotted-circle}** | string | Transaction affiliation (such as channel) |
+| `tr_city` | **{dotted-circle}** | string | Delivery address: city |
+| `tr_country` | **{dotted-circle}** | string | Delivery address: country |
+| `tr_currency` | **{dotted-circle}** | string | Transaction Currency |
+| `tr_orderid` | **{dotted-circle}** | string | Order ID |
+| `tr_shipping` | **{dotted-circle}** | decimal | Delivery cost charged |
+| `tr_shipping_base` | **{dotted-circle}** | decimal | Shipping cost in base currency |
+| `tr_state` | **{dotted-circle}** | string | Delivery address: state |
+| `tr_tax` | **{dotted-circle}** | decimal | Transaction tax value (such as amount of VAT included) |
+| `tr_tax_base` | **{dotted-circle}** | decimal | Tax applied in base currency |
+| `tr_total` | **{dotted-circle}** | decimal | Transaction total value |
+| `tr_total_base` | **{dotted-circle}** | decimal | Total amount of transaction in base currency |
+| `true_tstamp` | **{dotted-circle}** | timestamp | User-set exact timestamp |
+| `txn_id` | **{dotted-circle}** | string | Transaction ID |
+| `unstruct_event` | **{dotted-circle}** | JSON | The properties of the event |
+| `uploaded_at` | **{dotted-circle}** | | |
+| `user_fingerprint` | **{dotted-circle}** | integer | User identifier based on (hopefully unique) browser features |
+| `user_id` | **{dotted-circle}** | string | Unique identifier for user, set by the business using setUserId |
+| `user_ipaddress` | **{dotted-circle}** | string | IP address |
+| `useragent` | **{dotted-circle}** | string | User agent (expressed as a browser string) |
+| `v_collector` | **{dotted-circle}** | string | Collector version |
+| `v_etl` | **{dotted-circle}** | string | ETL version |
+| `v_tracker` | **{dotted-circle}** | string | Identifier for Snowplow tracker |
+
+## `gitlab_service_ping`
+
+Backend events converted from ServicePing (`redis` and `redis_hll`) must include [ServicePing context](https://gitlab.com/gitlab-org/iglu/-/tree/master/public/schemas/com.gitlab/gitlab_service_ping/jsonschema)
+using the [helper class](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/tracking/service_ping_context.rb).
+
+An example of converted `redis_hll` [event with context](https://gitlab.com/gitlab-org/gitlab/-/edit/master/app/controllers/concerns/product_analytics_tracking.rb#L58).
+
+| Field Name | Required | Type | Description |
+|---------------|:-------------------:|------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `data_source` | **{check-circle}** | string (max 64 chars) | The `data_source` attribute from the metrics YAML definition. |
+| `event_name`* | **{dotted-circle}** | string (max 128 chars) | When there is a many-to-many relationship between events and metrics, this field contains the name of a Redis event that can be used for aggregations in downstream systems |
+| `key_path`* | **{dotted-circle}** | string (max 256 chars) | The `key_path` attribute from the metrics YAML definition |
+
+_\* Either `event_name` or `key_path` is required_
diff --git a/doc/development/internal_analytics/snowplow/troubleshooting.md b/doc/development/internal_analytics/snowplow/troubleshooting.md
new file mode 100644
index 00000000000..885f4e0c16f
--- /dev/null
+++ b/doc/development/internal_analytics/snowplow/troubleshooting.md
@@ -0,0 +1,80 @@
+---
+stage: Analytics
+group: Analytics Instrumentation
+info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+---
+
+# Troubleshooting Snowplow
+
+## Monitoring
+
+This page covers dashboards and alerts coming from a number of internal tools.
+
+For a brief video overview of the tools used to monitor Snowplow usage, please check out [this internal video](https://www.youtube.com/watch?v=NxPS0aKa_oU) (you must be logged into GitLab Unfiltered to view).
+
+## Good events drop
+
+### Symptoms
+
+You will be alarmed via a [Sisense alert](https://app.periscopedata.com/app/gitlab/alert/Volume-of-Snowplow-Good-events/5a5f80ef34fe450da5ebb84eaa84067f/edit) that is sent to `#g_product_intelligence` Slack channel
+
+### Locating the problem
+
+First you need to identify at which stage in Snowplow the data pipeline the drop is occurring.
+Start at [Snowplow dashboard](https://console.aws.amazon.com/systems-manager/resource-groups/cloudwatch?dashboard=SnowPlow&region=us-east-1#) on CloudWatch,
+if you do not have access to CloudWatch you need to create an [access request issue](https://gitlab.com/gitlab-com/team-member-epics/access-requests/-/issues/9730) first.
+While on CloudWatch dashboard set time range to last 4 weeks, to get better picture of system characteristics over time. Than visit following charts:
+
+1. `ELB New Flow Count` and `Collector Auto Scaling Group Network In/Out` - they show in order: number of connections to collectors via load balancers and data volume (in bytes) processed by collectors. If there is drop visible there, it means less events were fired from the GitLab application. Proceed to [application layer guide](#troubleshooting-gitlab-application-layer) for more details
+1. `Firehose Records to S3` - it shows how many event records were saved to S3 bucket, if there was drop on this chart but not on the charts from 1. it means that problem is located at AWS infrastructure layer, please refer to [AWS layer guide](#troubleshooting-aws-layer)
+1. If drop wasn't visible on any of previous charts it means that problem is at data warehouse layer, please refer to [data warehouse layer guide](#troubleshooting-data-warehouse-layer)
+
+### Troubleshooting GitLab application layer
+
+Drop occurring at application layer can be symptom of some issue, but it might be also a result of normal application lifecycle, intended changes done to analytics instrumentation or experiments tracking
+or even a result of a public holiday in some regions of the world with a larger user-base. To verify if there is an underlying problem to solve, you can check following things:
+
+1. Check `about.gitlab.com` website traffic on [Google Analytics](https://analytics.google.com/analytics/web/) to verify if some public holiday might impact overall use of GitLab system
+ 1. You may require to open an access request for Google Analytics access first, for example: [access request internal issue](https://gitlab.com/gitlab-com/team-member-epics/access-requests/-/issues/1772)
+1. Plot `select date(dvce_created_tstamp) , event , count(*) from legacy.snowplow_unnested_events_90 where dvce_created_tstamp > '2021-06-15' and dvce_created_tstamp < '2021-07-10' group by 1 , 2 order by 1 , 2` in SiSense to see what type of events was responsible for drop
+1. Plot `select date(dvce_created_tstamp) ,se_category , count(*) from legacy.snowplow_unnested_events_90 where dvce_created_tstamp > '2021-06-15' and dvce_created_tstamp < '2021-07-31' and event = 'struct' group by 1 , 2 order by 1, 2` what events recorded the biggest drops in suspected category
+1. Check if there was any MR merged that might cause reduction in reported events, pay an attention to ~"analytics instrumentation" and ~"growth experiment" labeled MRs
+1. Check (via [Grafana explore tab](https://dashboards.gitlab.net/explore) ) following Prometheus counters `gitlab_snowplow_events_total`, `gitlab_snowplow_failed_events_total` and `gitlab_snowplow_successful_events_total` to see how many events were fired correctly from GitLab.com. Example query to use `sum(rate(gitlab_snowplow_successful_events_total{env="gprd"}[5m])) / sum(rate(gitlab_snowplow_events_total{env="gprd"}[5m]))` would chart rate at which number of good events rose in comparison to events sent in total. If number drops from 1 it means that problem might be in communication between GitLab and AWS collectors fleet.
+1. Check [logs in Kibana](https://log.gprd.gitlab.net/app/discover#) and filter with `{ "query": { "match_phrase": { "json.message": "failed to be reported to collector at" } } }` if there are some failed events logged
+
+For results about an investigation conducted into an unexpected drop in snowplow events volume, see [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/335206).
+
+### Troubleshooting AWS layer
+
+Already conducted investigations:
+
+- [Steep decrease of Snowplow page views](https://gitlab.com/gitlab-org/gitlab/-/issues/268009)
+- [`snowplow.trx.gitlab.net` unreachable](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/5073)
+
+### Troubleshooting data warehouse layer
+
+Reach out to [Data team](https://about.gitlab.com/handbook/business-technology/data-team/) to ask about current state of data warehouse. On their handbook page there is a [section with contact details](https://about.gitlab.com/handbook/business-technology/data-team/#how-to-connect-with-us)
+
+## Delay in Snowplow Enrichers
+
+If there is an alert for **Snowplow Raw Good Stream Backing Up**, we receive an email notification. This sometimes happens because Snowplow Enrichers don't scale well enough for the amount of Snowplow events.
+
+If the delay goes over 48 hours, we lose data.
+
+### Contact SRE on-call
+
+Send a message in the [#infrastructure_lounge](https://gitlab.slack.com/archives/CB3LSMEJV) Slack channel using the following template:
+
+```markdown
+Hello team!
+
+We received an alert for [Snowplow Raw Good Stream Backing Up](https://us-east-1.console.aws.amazon.com/cloudwatch/home?region=us-east-1#alarmsV2:alarm/SnowPlow+Raw+Good+Stream+Backing+Up?).
+
+Enrichers are not scalling well for the amount of events we receive.
+
+See the [dashboard](https://us-east-1.console.aws.amazon.com/cloudwatch/home?region=us-east-1#dashboards:name=SnowPlow).
+
+Could we get assistance to fix the delay?
+
+Thank you!
+```
diff --git a/doc/development/product_qualified_lead_guide/index.md b/doc/development/product_qualified_lead_guide/index.md
index fb8ec478840..9f5a1a1110f 100644
--- a/doc/development/product_qualified_lead_guide/index.md
+++ b/doc/development/product_qualified_lead_guide/index.md
@@ -87,7 +87,7 @@ The hand-raise lead form accepts the following parameters via provide or inject.
},
```
-The `ctaTracking` parameters follow [the `data-track` attributes](../snowplow/implementation.md#data-track-attributes) for implementing Snowplow tracking. The provided tracking attributes are attached to the button inside the `HandRaiseLeadButton` component, which triggers the hand-raise lead modal when selected.
+The `ctaTracking` parameters follow [the `data-track` attributes](../internal_analytics/snowplow/implementation.md#data-track-attributes) for implementing Snowplow tracking. The provided tracking attributes are attached to the button inside the `HandRaiseLeadButton` component, which triggers the hand-raise lead modal when selected.
### Monitor the lead location
diff --git a/doc/development/service_ping/implement.md b/doc/development/service_ping/implement.md
index 0677e8febf3..c1077793fb9 100644
--- a/doc/development/service_ping/implement.md
+++ b/doc/development/service_ping/implement.md
@@ -1,882 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/implement.md'
+remove_date: '2023-08-20'
---
-# Implement Service Ping
+This document was moved to [another location](../internal_analytics/service_ping/implement.md).
-Service Ping consists of two kinds of data:
-
-- **Counters**: Track how often a certain event happened over time, such as how many CI/CD pipelines have run.
- They are monotonic and usually trend up.
-- **Observations**: Facts collected from one or more GitLab instances and can carry arbitrary data.
- There are no general guidelines for how to collect those, due to the individual nature of that data.
-
-To implement a new metric in Service Ping, follow these steps:
-
-1. [Implement the required counter](#types-of-counters)
-1. [Name and place the metric](metrics_dictionary.md#metric-key_path)
-1. [Test counters manually using your Rails console](#test-counters-manually-using-your-rails-console)
-1. [Generate the SQL query](#generate-the-sql-query)
-1. [Optimize queries with Database Lab](#optimize-queries-with-database-lab)
-1. [Add the metric definition to the Metrics Dictionary](#add-the-metric-definition)
-1. [Add the metric to the Versions Application](#add-the-metric-to-the-versions-application)
-1. [Create a merge request](#create-a-merge-request)
-1. [Verify your metric](#verify-your-metric)
-1. [Set up and test Service Ping locally](#set-up-and-test-service-ping-locally)
-
-## Instrumentation classes
-
-NOTE:
-Implementing metrics directly in `usage_data.rb` is deprecated.
-When you add or change a Service Ping Metric, you must migrate metrics to [instrumentation classes](metrics_instrumentation.md).
-For information about the progress on migrating Service Ping metrics, see this [epic](https://gitlab.com/groups/gitlab-org/-/epics/5547).
-
-For example, we have the following instrumentation class:
-`lib/gitlab/usage/metrics/instrumentations/count_boards_metric.rb`.
-
-You should add it to `usage_data.rb` as follows:
-
-```ruby
-boards: add_metric('CountBoardsMetric', time_frame: 'all'),
-```
-
-## Types of counters
-
-There are several types of counters for metrics:
-
-- **[Batch counters](#batch-counters)**: Used for counts, sums, and averages.
-- **[Redis counters](#redis-counters):** Used for in-memory counts.
-- **[Alternative counters](#alternative-counters):** Used for settings and configurations.
-
-NOTE:
-Only use the provided counter methods. Each counter method contains a built-in fail-safe mechanism that isolates each counter to avoid breaking the entire Service Ping process.
-
-### Batch counters
-
-For large tables, PostgreSQL can take a long time to count rows due to MVCC [(Multi-version Concurrency Control)](https://en.wikipedia.org/wiki/Multiversion_concurrency_control). Batch counting is a counting method where a single large query is broken into multiple smaller queries. For example, instead of a single query querying 1,000,000 records, with batch counting, you can execute 100 queries of 10,000 records each. Batch counting is useful for avoiding database timeouts as each batch query is significantly shorter than one single long running query.
-
-For GitLab.com, there are extremely large tables with 15 second query timeouts, so we use batch counting to avoid encountering timeouts. Here are the sizes of some GitLab.com tables:
-
-| Table | Row counts in millions |
-|------------------------------|------------------------|
-| `merge_request_diff_commits` | 2280 |
-| `ci_build_trace_sections` | 1764 |
-| `merge_request_diff_files` | 1082 |
-| `events` | 514 |
-
-Batch counting requires indexes on columns to calculate max, min, and range queries. In some cases,
-you must add a specialized index on the columns involved in a counter.
-
-#### Ordinary batch counters
-
-Create a new [database metrics](metrics_instrumentation.md#database-metrics) instrumentation class with `count` operation for a given `ActiveRecord_Relation`
-
-Method:
-
-```ruby
-add_metric('CountIssuesMetric', time_frame: 'all')
-```
-
-Examples:
-
-Examples using `usage_data.rb` have been [deprecated](usage_data.md). We recommend to use [instrumentation classes](metrics_instrumentation.md).
-
-#### Distinct batch counters
-
-Create a new [database metrics](metrics_instrumentation.md#database-metrics) instrumentation class with `distinct_count` operation for a given `ActiveRecord_Relation`.
-
-Method:
-
-```ruby
-add_metric('CountUsersAssociatingMilestonesToReleasesMetric', time_frame: 'all')
-```
-
-WARNING:
-Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../database/iterating_tables_in_batches.md) guide.
-
-Examples:
-
-Examples using `usage_data.rb` have been [deprecated](usage_data.md). We recommend to use [instrumentation classes](metrics_instrumentation.md).
-
-#### Sum batch operation
-
-Sum the values of a given ActiveRecord_Relation on given column and handles errors.
-Handles the `ActiveRecord::StatementInvalid` error
-
-Method:
-
-```ruby
-add_metric('JiraImportsTotalImportedIssuesCountMetric')
-```
-
-#### Average batch operation
-
-Average the values of a given `ActiveRecord_Relation` on given column and handles errors.
-
-Method:
-
-```ruby
-add_metric('CountIssuesWeightAverageMetric')
-```
-
-Examples:
-
-Examples using `usage_data.rb` have been [deprecated](usage_data.md). We recommend to use [instrumentation classes](metrics_instrumentation.md).
-
-#### Grouping and batch operations
-
-The `count`, `distinct_count` and `sum` batch counters can accept an `ActiveRecord::Relation`
-object, which groups by a specified column. With a grouped relation, the methods do batch counting,
-handle errors, and returns a hash table of key-value pairs.
-
-Examples:
-
-```ruby
-count(Namespace.group(:type))
-# returns => {nil=>179, "Group"=>54}
-
-distinct_count(Project.group(:visibility_level), :creator_id)
-# returns => {0=>1, 10=>1, 20=>11}
-
-sum(Issue.group(:state_id), :weight))
-# returns => {1=>3542, 2=>6820}
-```
-
-#### Add operation
-
-Sum the values given as parameters. Handles the `StandardError`.
-Returns `-1` if any of the arguments are `-1`.
-
-Method:
-
-```ruby
-add(*args)
-```
-
-Examples:
-
-```ruby
-project_imports = distinct_count(::Project.where.not(import_type: nil), :creator_id)
-bulk_imports = distinct_count(::BulkImport, :user_id)
-
- add(project_imports, bulk_imports)
-```
-
-#### Estimated batch counters
-
-> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/48233) in GitLab 13.7.
-
-Estimated batch counter functionality handles `ActiveRecord::StatementInvalid` errors
-when used through the provided `estimate_batch_distinct_count` method.
-Errors return a value of `-1`.
-
-WARNING:
-This functionality estimates a distinct count of a specific ActiveRecord_Relation in a given column,
-which uses the [HyperLogLog](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/40671.pdf) algorithm.
-As the HyperLogLog algorithm is probabilistic, the **results always include error**.
-The highest encountered error rate is 4.9%.
-
-When correctly used, the `estimate_batch_distinct_count` method enables efficient counting over
-columns that contain non-unique values, which cannot be assured by other counters.
-
-##### `estimate_batch_distinct_count` method
-
-Method:
-
-```ruby
-estimate_batch_distinct_count(relation, column = nil, batch_size: nil, start: nil, finish: nil)
-```
-
-The [method](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/utils/usage_data.rb#L63)
-includes the following arguments:
-
-- `relation`: The ActiveRecord_Relation to perform the count.
-- `column`: The column to perform the distinct count. The default is the primary key.
-- `batch_size`: From `Gitlab::Database::PostgresHll::BatchDistinctCounter::DEFAULT_BATCH_SIZE`. Default value: 10,000.
-- `start`: The custom start of the batch count, to avoid complex minimum calculations.
-- `finish`: The custom end of the batch count to avoid complex maximum calculations.
-
-The method includes the following prerequisites:
-
-- The supplied `relation` must include the primary key defined as the numeric column.
- For example: `id bigint NOT NULL`.
-- The `estimate_batch_distinct_count` can handle a joined relation. To use its ability to
- count non-unique columns, the joined relation **must not** have a one-to-many relationship,
- such as `has_many :boards`.
-- Both `start` and `finish` arguments should always represent primary key relationship values,
- even if the estimated count refers to another column, for example:
-
- ```ruby
- estimate_batch_distinct_count(::Note, :author_id, start: ::Note.minimum(:id), finish: ::Note.maximum(:id))
- ```
-
-Examples:
-
-1. Simple execution of estimated batch counter, with only relation provided,
- returned value represents estimated number of unique values in `id` column
- (which is the primary key) of `Project` relation:
-
- ```ruby
- estimate_batch_distinct_count(::Project)
- ```
-
-1. Execution of estimated batch counter, where provided relation has applied
- additional filter (`.where(time_period)`), number of unique values estimated
- in custom column (`:author_id`), and parameters: `start` and `finish` together
- apply boundaries that defines range of provided relation to analyze:
-
- ```ruby
- estimate_batch_distinct_count(::Note.with_suggestions.where(time_period), :author_id, start: ::Note.minimum(:id), finish: ::Note.maximum(:id))
- ```
-
-When instrumenting metric with usage of estimated batch counter please add
-`_estimated` suffix to its name, for example:
-
-```ruby
- "counts": {
- "ci_builds_estimated": estimate_batch_distinct_count(Ci::Build),
- ...
-```
-
-### Redis counters
-
-Handles `::Redis::CommandError` and `Gitlab::UsageDataCounters::BaseCounter::UnknownEvent`.
-Returns -1 when a block is sent or hash with all values and -1 when a `counter(Gitlab::UsageDataCounters)` is sent.
-The different behavior is due to 2 different implementations of the Redis counter.
-
-Method:
-
-```ruby
-redis_usage_data(counter, &block)
-```
-
-Arguments:
-
-- `counter`: a counter from `Gitlab::UsageDataCounters`, that has `fallback_totals` method implemented
-- or a `block`: which is evaluated
-
-#### Ordinary Redis counters
-
-Example of implementation: [`Gitlab::UsageDataCounters::WikiPageCounter`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/wiki_page_counter.rb), using Redis methods [`INCR`](https://redis.io/commands/incr/) and [`GET`](https://redis.io/commands/get/).
-
-Events are handled by counter classes in the `Gitlab::UsageDataCounters` namespace, inheriting from `BaseCounter`, that are either:
-
-1. Listed in [`Gitlab::UsageDataCounters::COUNTERS`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters.rb#L5) to be then included in `Gitlab::UsageData`.
-
-1. Specified in the metric definition using the `RedisMetric` instrumentation class by their `prefix` option to be picked up using the [metric instrumentation](metrics_instrumentation.md) framework. Refer to the [Redis metrics](metrics_instrumentation.md#redis-metrics) documentation for an example implementation.
-
-Inheriting classes are expected to override `KNOWN_EVENTS` and `PREFIX` constants to build event names and associated metrics. For example, for prefix `issues` and events array `%w[create, update, delete]`, three metrics will be added to the Service Ping payload: `counts.issues_create`, `counts.issues_update` and `counts.issues_delete`.
-
-##### `UsageData` API
-
-You can use the `UsageData` API to track events.
-To track events, the `usage_data_api` feature flag must
-be enabled (set to `default_enabled: true`).
-Enabled by default in GitLab 13.7 and later.
-
-##### UsageData API tracking
-
-1. Track events using the [`UsageData` API](#usagedata-api).
-
- Increment event count using an ordinary Redis counter, for a given event name.
-
- API requests are protected by checking for a valid CSRF token.
-
- ```plaintext
- POST /usage_data/increment_counter
- ```
-
- | Attribute | Type | Required | Description |
- | :-------- | :--- | :------- | :---------- |
- | `event` | string | yes | The event name to track. |
-
- Response:
-
- - `200` if the event was tracked.
- - `400 Bad request` if the event parameter is missing.
- - `401 Unauthorized` if the user is not authenticated.
- - `403 Forbidden` if an invalid CSRF token is provided.
-
-1. Track events using the JavaScript/Vue API helper which calls the [`UsageData` API](#usagedata-api).
-
- To track events, `usage_data_api` and `usage_data_#{event_name}` must be enabled.
-
- ```javascript
- import api from '~/api';
-
- api.trackRedisCounterEvent('my_already_defined_event_name'),
- ```
-
-#### Redis HLL counters
-
-WARNING:
-HyperLogLog (HLL) is a probabilistic algorithm and its **results always includes some small error**. According to [Redis documentation](https://redis.io/commands/pfcount/), data from
-used HLL implementation is "approximated with a standard error of 0.81%".
-
-NOTE:
- A user's consent for `usage_stats` (`User.single_user&.requires_usage_stats_consent?`) is not checked during the data tracking stage due to performance reasons. Keys corresponding to those counters are present in Redis even if `usage_stats_consent` is still required. However, no metric is collected from Redis and reported back to GitLab as long as `usage_stats_consent` is required.
-
-With `Gitlab::UsageDataCounters::HLLRedisCounter` we have available data structures used to count unique values.
-
-Implemented using Redis methods [PFADD](https://redis.io/commands/pfadd/) and [PFCOUNT](https://redis.io/commands/pfcount/).
-
-##### Add new events
-
-1. Define events in [`known_events`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/).
-
- Example event:
-
- ```yaml
- - name: users_creating_epics
- aggregation: weekly
- ```
-
- Keys:
-
- - `name`: unique event name.
-
- Name format for Redis HLL events `{hll_counters}_<name>`
-
- Example names: `users_creating_epics`, `users_triggering_security_scans`.
-
- - `aggregation`: may be set to a `:daily` or `:weekly` key. Defines how counting data is stored in Redis.
- Aggregation on a `daily` basis does not pull more fine grained data.
-
-1. Use one of the following methods to track the event:
-
- - In the controller using the `ProductAnalyticsTracking` module and the following format:
-
- ```ruby
- track_event(*controller_actions, name:, action:, label:, conditions: nil, destinations: [:redis_hll], &block)
- ```
-
- Arguments:
-
- - `controller_actions`: the controller actions to track.
- - `name`: the event name.
- - `action`: required if destination is `:snowplow. Action name for the triggered event. See [event schema](../snowplow/index.md#event-schema) for more details.
- - `label`: required if destination is `:snowplow. Label for the triggered event. See [event schema](../snowplow/index.md#event-schema) for more details.
- - `conditions`: optional custom conditions. Uses the same format as Rails callbacks.
- - `destinations`: optional list of destinations. Currently supports `:redis_hll` and `:snowplow`. Default: `:redis_hll`.
- - `&block`: optional block that computes and returns the `custom_id` that we want to track. This overrides the `visitor_id`.
-
- Example:
-
- ```ruby
- # controller
- class ProjectsController < Projects::ApplicationController
- include ProductAnalyticsTracking
-
- skip_before_action :authenticate_user!, only: :show
- track_event :index, :show,
- name: 'users_visiting_projects',
- action: 'user_perform_visit',
- label: 'redis_hll_counters.users_visiting_project_monthly',
- destinations: %i[redis_hll snowplow]
-
- def index
- render html: 'index'
- end
-
- def new
- render html: 'new'
- end
-
- def show
- render html: 'show'
- end
- end
- ```
-
- - In the API using the `increment_unique_values(event_name, values)` helper method.
-
- Arguments:
-
- - `event_name`: the event name.
- - `values`: the values counted. Can be one value or an array of values.
-
- Example:
-
- ```ruby
- get ':id/registry/repositories' do
- repositories = ContainerRepositoriesFinder.new(
- user: current_user, subject: user_group
- ).execute
-
- increment_unique_values('users_listing_repositories', current_user.id)
-
- present paginate(repositories), with: Entities::ContainerRegistry::Repository, tags: params[:tags], tags_count: params[:tags_count]
- end
- ```
-
- - Using `track_usage_event(event_name, values)` in services and GraphQL.
-
- Increment unique values count using Redis HLL, for a given event name.
-
- Examples:
-
- - [Track usage event for an incident in a service](https://gitlab.com/gitlab-org/gitlab/-/blob/v13.8.3-ee/app/services/issues/update_service.rb#L66)
- - [Track usage event for an incident in GraphQL](https://gitlab.com/gitlab-org/gitlab/-/blob/v13.8.3-ee/app/graphql/mutations/alert_management/update_alert_status.rb#L16)
-
- ```ruby
- track_usage_event(:incident_management_incident_created, current_user.id)
- ```
-
- - Using the [`UsageData` API](#usagedata-api).
-
- Increment unique users count using Redis HLL, for a given event name.
-
- API requests are protected by checking for a valid CSRF token.
-
- ```plaintext
- POST /usage_data/increment_unique_users
- ```
-
- | Attribute | Type | Required | Description |
- | :-------- | :--- | :------- | :---------- |
- | `event` | string | yes | The event name to track |
-
- Response:
-
- - `200` if the event was tracked, or if tracking failed for any reason.
- - `400 Bad request` if an event parameter is missing.
- - `401 Unauthorized` if the user is not authenticated.
- - `403 Forbidden` if an invalid CSRF token is provided.
-
- - Using the JavaScript/Vue API helper, which calls the [`UsageData` API](#usagedata-api).
-
- Example for an existing event already defined in [known events](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/):
-
- ```javascript
- import api from '~/api';
-
- api.trackRedisHllUserEvent('my_already_defined_event_name'),
- ```
-
-1. Get event data using `Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(event_names:, start_date:, end_date:, context: '')`.
-
- Arguments:
-
- - `event_names`: the list of event names.
- - `start_date`: start date of the period for which we want to get event data.
- - `end_date`: end date of the period for which we want to get event data.
- - `context`: context of the event. Allowed values are `default`, `free`, `bronze`, `silver`, `gold`, `starter`, `premium`, `ultimate`.
-
-1. Testing tracking and getting unique events
-
-Trigger events in rails console by using `track_event` method
-
- ```ruby
- Gitlab::UsageDataCounters::HLLRedisCounter.track_event('users_viewing_compliance_audit_events', values: 1)
- Gitlab::UsageDataCounters::HLLRedisCounter.track_event('users_viewing_compliance_audit_events', values: [2, 3])
- ```
-
-Next, get the unique events for the current week.
-
- ```ruby
- # Get unique events for metric for current_week
- Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(event_names: 'users_viewing_compliance_audit_events',
- start_date: Date.current.beginning_of_week, end_date: Date.current.next_week)
- ```
-
-##### Recommendations
-
-We have the following recommendations for [adding new events](#add-new-events):
-
-- Event aggregation: weekly.
-- When adding new metrics, use a [feature flag](../../operations/feature_flags.md) to control the impact.
-It's recommended to disable the new feature flag by default (set `default_enabled: false`).
-- Events can be triggered using the `UsageData` API, which helps when there are > 10 events per change
-
-##### Enable or disable Redis HLL tracking
-
-We can disable tracking completely by using the global flag:
-
-```shell
-/chatops run feature set redis_hll_tracking true
-/chatops run feature set redis_hll_tracking false
-```
-
-##### Known events are added automatically in Service Data payload
-
-Service Ping adds all events [`known_events/*.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events) to Service Data generation under the `redis_hll_counters` key. This column is stored in [version-app as a JSON](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/db/schema.rb#L209).
-For each event we add metrics for the weekly and monthly time frames, and totals for each where applicable:
-
-- `#{event_name}_weekly`: Data for 7 days for daily [aggregation](#add-new-events) events and data for the last complete week for weekly [aggregation](#add-new-events) events.
-- `#{event_name}_monthly`: Data for 28 days for daily [aggregation](#add-new-events) events and data for the last 4 complete weeks for weekly [aggregation](#add-new-events) events.
-
-Example of `redis_hll_counters` data:
-
-```ruby
-{:redis_hll_counters=>
- {"compliance"=>
- {"users_viewing_compliance_dashboard_weekly"=>0,
- "users_viewing_compliance_dashboard_monthly"=>0,
- "users_viewing_compliance_audit_events_weekly"=>0,
- "users_viewing_audit_events_monthly"=>0,
- "compliance_total_unique_counts_weekly"=>0,
- "compliance_total_unique_counts_monthly"=>0},
- "analytics"=>
- {"users_viewing_analytics_group_devops_adoption_weekly"=>0,
- "users_viewing_analytics_group_devops_adoption_monthly"=>0,
- "analytics_total_unique_counts_weekly"=>0,
- "analytics_total_unique_counts_monthly"=>0},
- "ide_edit"=>
- {"users_editing_by_web_ide_weekly"=>0,
- "users_editing_by_web_ide_monthly"=>0,
- "users_editing_by_sfe_weekly"=>0,
- "users_editing_by_sfe_monthly"=>0,
- "ide_edit_total_unique_counts_weekly"=>0,
- "ide_edit_total_unique_counts_monthly"=>0}
- }
-}
-```
-
-Example:
-
-```ruby
-# Redis Counters
-redis_usage_data(Gitlab::UsageDataCounters::WikiPageCounter)
-
-# Define events in common.yml https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/common.yml
-
-# Tracking events
-Gitlab::UsageDataCounters::HLLRedisCounter.track_event('users_expanding_vulnerabilities', values: visitor_id)
-
-# Get unique events for metric
-redis_usage_data { Gitlab::UsageDataCounters::HLLRedisCounter.unique_events(event_names: 'users_expanding_vulnerabilities', start_date: 28.days.ago, end_date: Date.current) }
-```
-
-### Alternative counters
-
-Handles `StandardError` and fallbacks into -1 this way not all measures fail if we encounter one exception.
-Mainly used for settings and configurations.
-
-Method:
-
-```ruby
-alt_usage_data(value = nil, fallback: -1, &block)
-```
-
-Arguments:
-
-- `value`: a static value in which case the value is returned.
-- or a `block`: which is evaluated
-- `fallback: -1`: the common value used for any metrics that are failing.
-
-Example:
-
-```ruby
-alt_usage_data { Gitlab::VERSION }
-alt_usage_data { Gitlab::CurrentSettings.uuid }
-alt_usage_data(999)
-```
-
-### Add counters to build new metrics
-
-When adding the results of two counters, use the `add` Service Data method that
-handles fallback values and exceptions. It also generates a valid [SQL export](index.md#export-service-ping-data).
-
-Example:
-
-```ruby
-add(User.active, User.bot)
-```
-
-### Prometheus queries
-
-In those cases where operational metrics should be part of Service Ping, a database or Redis query is unlikely
-to provide useful data. Instead, Prometheus might be more appropriate, because most GitLab architectural
-components publish metrics to it that can be queried back, aggregated, and included as Service Data.
-
-NOTE:
-Prometheus as a data source for Service Ping is only available for single-node Omnibus installations
-that are running the [bundled Prometheus](../../administration/monitoring/prometheus/index.md) instance.
-
-To query Prometheus for metrics, a helper method is available to `yield` a fully configured
-`PrometheusClient`, given it is available as per the note above:
-
-```ruby
-with_prometheus_client do |client|
- response = client.query('<your query>')
- ...
-end
-```
-
-Refer to [the `PrometheusClient` definition](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/prometheus_client.rb)
-for how to use its API to query for data.
-
-### Fallback values for Service Ping
-
-We return fallback values in these cases:
-
-| Case | Value |
-|-----------------------------|-------|
-| Deprecated Metric ([Removed with version 14.3](https://gitlab.com/gitlab-org/gitlab/-/issues/335894)) | -1000 |
-| Timeouts, general failures | -1 |
-| Standard errors in counters | -2 |
-| Histogram metrics failure | { '-1' => -1 } |
-
-## Test counters manually using your Rails console
-
-```ruby
-# count
-Gitlab::UsageData.count(User.active)
-Gitlab::UsageData.count(::Clusters::Cluster.aws_installed.enabled, :cluster_id)
-
-# count distinct
-Gitlab::UsageData.distinct_count(::Project, :creator_id)
-Gitlab::UsageData.distinct_count(::Note.with_suggestions.where(time_period), :author_id, start: ::User.minimum(:id), finish: ::User.maximum(:id))
-```
-
-## Generate the SQL query
-
-Your Rails console returns the generated SQL queries. For example:
-
-```ruby
-pry(main)> Gitlab::UsageData.count(User.active)
- (2.6ms) SELECT "features"."key" FROM "features"
- (15.3ms) SELECT MIN("users"."id") FROM "users" WHERE ("users"."state" IN ('active')) AND ("users"."user_type" IS NULL OR "users"."user_type" IN (6, 4))
- (2.4ms) SELECT MAX("users"."id") FROM "users" WHERE ("users"."state" IN ('active')) AND ("users"."user_type" IS NULL OR "users"."user_type" IN (6, 4))
- (1.9ms) SELECT COUNT("users"."id") FROM "users" WHERE ("users"."state" IN ('active')) AND ("users"."user_type" IS NULL OR "users"."user_type" IN (6, 4)) AND "users"."id" BETWEEN 1 AND 100000
-```
-
-## Optimize queries with Database Lab
-
-[Database Lab](../database/database_lab.md) is a service that uses a production clone to test queries.
-
-- GitLab.com's production database has a 15 second timeout.
-- Any single query must stay below the [1 second execution time](../database/query_performance.md#timing-guidelines-for-queries) with cold caches.
-- Add a specialized index on columns involved to reduce the execution time.
-
-To understand the query's execution, we add the following information
-to a merge request description:
-
-- For counters that have a `time_period` test, we add information for both:
- - `time_period = {}` for all time periods.
- - `time_period = { created_at: 28.days.ago..Time.current }` for the last 28 days.
-- Execution plan and query time before and after optimization.
-- Query generated for the index and time.
-- Migration output for up and down execution.
-
-For more details, see the [database review guide](../database_review.md#preparation-when-adding-or-modifying-queries).
-
-### Optimization recommendations and examples
-
-- Use specialized indexes. For examples, see these merge requests:
- - [Example 1](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26871)
- - [Example 2](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/26445)
-- Use defined `start` and `finish`. These values can be memoized and reused, as in this
- [example merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/37155).
-- Avoid joins and unnecessary complexity in your queries. See this
- [example merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/36316) as an example.
-- Set a custom `batch_size` for `distinct_count`, as in this [example merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/38000).
-
-## Add the metric definition
-
-See the [Metrics Dictionary guide](metrics_dictionary.md) for more information.
-
-## Add the metric to the Versions Application
-
-Check if the new metric must be added to the Versions Application. See the `usage_data` [schema](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/db/schema.rb#L147) and Service Data [parameters accepted](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/app/services/usage_ping.rb). Any metrics added under the `counts` key are saved in the `stats` column.
-
-## Create a merge request
-
-Create a merge request for the new Service Ping metric, and do the following:
-
-- Add the `feature` label to the merge request. A metric is a user-facing change and is part of expanding the Service Ping feature.
-- Add a changelog entry that complies with the [changelog entries guide](../changelog.md).
-- Ask for an Analytics Instrumentation review.
- On GitLab.com, we have DangerBot set up to monitor Analytics Instrumentation related files and recommend a [Analytics Instrumentation review](review_guidelines.md).
-
-## Verify your metric
-
-On GitLab.com, the Analytics Instrumentation team regularly [monitors Service Ping](https://gitlab.com/groups/gitlab-org/-/epics/6000).
-They may alert you that your metrics need further optimization to run quicker and with greater success.
-
-The Service Ping JSON payload for GitLab.com is shared in the
-[#g_analyze_analytics_instrumentation](https://gitlab.slack.com/archives/CL3A7GFPF) Slack channel every week.
-
-You may also use the [Service Ping QA dashboard](https://app.periscopedata.com/app/gitlab/632033/Usage-Ping-QA) to check how well your metric performs.
-The dashboard allows filtering by GitLab version, by "Self-managed" and "SaaS", and shows you how many failures have occurred for each metric. Whenever you notice a high failure rate, you can re-optimize your metric.
-
-Use [Metrics Dictionary](https://metrics.gitlab.com/) [copy query to clipboard feature](https://www.youtube.com/watch?v=n4o65ivta48&list=PL05JrBw4t0Krg3mbR6chU7pXtMt_es6Pb) to get a query ready to run in Sisense for a specific metric.
-
-## Set up and test Service Ping locally
-
-To set up Service Ping locally, you must:
-
-1. [Set up local repositories](#set-up-local-repositories).
-1. [Test local setup](#test-local-setup).
-1. Optional. [Test Prometheus-based Service Ping](#test-prometheus-based-service-ping).
-
-### Set up local repositories
-
-1. Clone and start [GitLab](https://gitlab.com/gitlab-org/gitlab-development-kit).
-1. Clone and start [Versions Application](https://gitlab.com/gitlab-services/version-gitlab-com).
- Make sure you run `docker-compose up` to start a PostgreSQL and Redis instance.
-1. Point GitLab to the Versions Application endpoint instead of the default endpoint:
- 1. Open [service_ping/submit_service.rb](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/services/service_ping/submit_service.rb#L5) locally and modify `STAGING_BASE_URL`.
- 1. Set it to the local Versions Application URL: `http://localhost:3000`.
-
-### Test local setup
-
-1. Using the `gitlab` Rails console, manually trigger Service Ping:
-
- ```ruby
- GitlabServicePingWorker.new.perform('triggered_from_cron' => false)
- ```
-
-1. Use the `versions` Rails console to check the Service Ping was successfully received,
- parsed, and stored in the Versions database:
-
- ```ruby
- UsageData.last
- ```
-
-## Test Prometheus-based Service Ping
-
-If the data submitted includes metrics [queried from Prometheus](#prometheus-queries)
-you want to inspect and verify, you must:
-
-- Ensure that a Prometheus server is running locally.
-- Ensure the respective GitLab components are exporting metrics to the Prometheus server.
-
-If you do not need to test data coming from Prometheus, no further action
-is necessary. Service Ping should degrade gracefully in the absence of a running Prometheus server.
-
-Three kinds of components may export data to Prometheus, and are included in Service Ping:
-
-- [`node_exporter`](https://github.com/prometheus/node_exporter): Exports node metrics
- from the host machine.
-- [`gitlab-exporter`](https://gitlab.com/gitlab-org/gitlab-exporter): Exports process metrics
- from various GitLab components.
-- Other various GitLab services, such as Sidekiq and the Rails server, which export their own metrics.
-
-### Test with an Omnibus container
-
-This is the recommended approach to test Prometheus-based Service Ping.
-
-To verify your change, build a new Omnibus image from your code branch using CI/CD, download the image,
-and run a local container instance:
-
-1. From your merge request, select the `qa` stage, then trigger the `e2e:package-and-test` job. This job triggers an Omnibus
- build in a [downstream pipeline of the `omnibus-gitlab-mirror` project](https://gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/-/pipelines).
-1. In the downstream pipeline, wait for the `gitlab-docker` job to finish.
-1. Open the job logs and locate the full container name including the version. It takes the following form: `registry.gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/gitlab-ee:<VERSION>`.
-1. On your local machine, make sure you are signed in to the GitLab Docker registry. You can find the instructions for this in
- [Authenticate to the GitLab Container Registry](../../user/packages/container_registry/authenticate_with_container_registry.md).
-1. Once signed in, download the new image by using `docker pull registry.gitlab.com/gitlab-org/build/omnibus-gitlab-mirror/gitlab-ee:<VERSION>`
-1. For more information about working with and running Omnibus GitLab containers in Docker, refer to [GitLab Docker images](../../install/docker.md) documentation.
-
-### Test with GitLab development toolkits
-
-This is the less recommended approach, because it comes with a number of difficulties when emulating a real GitLab deployment.
-
-The [GDK](https://gitlab.com/gitlab-org/gitlab-development-kit) is not set up to run a Prometheus server or `node_exporter` alongside other GitLab components. If you would
-like to do so, [Monitoring the GDK with Prometheus](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/prometheus/index.md#monitoring-the-gdk-with-prometheus) is a good start.
-
-The [GCK](https://gitlab.com/gitlab-org/gitlab-compose-kit) has limited support for testing Prometheus based Service Ping.
-By default, it comes with a fully configured Prometheus service that is set up to scrape a number of components.
-However, it has the following limitations:
-
-- It does not run a `gitlab-exporter` instance, so several `process_*` metrics from services such as Gitaly may be missing.
-- While it runs a `node_exporter`, `docker-compose` services emulate hosts, meaning that it normally reports itself as not associated
- with any of the other running services. That is not how node metrics are reported in a production setup, where `node_exporter`
- always runs as a process alongside other GitLab components on any given node. For Service Ping, none of the node data would therefore
- appear to be associated to any of the services running, because they all appear to be running on different hosts. To alleviate this problem, the `node_exporter` in GCK was arbitrarily "assigned" to the `web` service, meaning only for this service `node_*` metrics appears in Service Ping.
-
-## Aggregated metrics
-
-> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45979) in GitLab 13.6.
-
-WARNING:
-This feature is intended solely for internal GitLab use.
-
-The aggregated metrics feature provides insight into the data attributes in a collection of Service Ping metrics.
-This aggregation allows you to count data attributes in events without counting each occurrence of the same data attribute in multiple events.
-For example, you can aggregate the number of users who perform several actions, such as creating a new issue and opening a new merge request.
-You can then count each user that performed any combination of these actions.
-
-### Defining aggregated metric via metric YAML definition
-
-To add data for aggregated metrics to the Service Ping payload,
-create metric YAML definition file following [Aggregated metric instrumentation guide](metrics_instrumentation.md#aggregated-metrics).
-
-### Redis sourced aggregated metrics
-
-> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/45979) in GitLab 13.6.
-
-To declare the aggregate of events collected with [Redis HLL Counters](#redis-hll-counters),
-you must fulfill the following requirements:
-
-1. All events listed at `events` attribute must come from
- [`known_events/*.yml`](#known-events-are-added-automatically-in-service-data-payload) files.
-1. All events listed at `events` attribute must have the same `aggregation` attribute.
-1. `time_frame` does not include `all` value, which is unavailable for Redis sourced aggregated metrics.
-
-While it is possible to aggregate EE-only events together with events that occur in all GitLab editions, it's important to remember that doing so may produce high variance between data collected from EE and CE GitLab instances.
-
-### Database sourced aggregated metrics
-
-> [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/52784) in GitLab 13.9.
-
-To declare an aggregate of metrics based on events collected from database, follow
-these steps:
-
-1. [Persist the metrics for aggregation](#persist-metrics-for-aggregation).
-1. [Add new aggregated metric definition](#add-new-aggregated-metric-definition).
-
-#### Persist metrics for aggregation
-
-Only metrics calculated with [Estimated Batch Counters](#estimated-batch-counters)
-can be persisted for database sourced aggregated metrics. To persist a metric,
-inject a Ruby block into the
-[`estimate_batch_distinct_count`](#estimate_batch_distinct_count-method) method.
-This block should invoke the
-`Gitlab::Usage::Metrics::Aggregates::Sources::PostgresHll.save_aggregated_metrics`
-[method](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage/metrics/aggregates/sources/postgres_hll.rb#L21),
-which stores `estimate_batch_distinct_count` results for future use in aggregated metrics.
-
-The `Gitlab::Usage::Metrics::Aggregates::Sources::PostgresHll.save_aggregated_metrics`
-method accepts the following arguments:
-
-- `metric_name`: The name of metric to use for aggregations. Should be the same
- as the key under which the metric is added into Service Ping.
-- `recorded_at_timestamp`: The timestamp representing the moment when a given
- Service Ping payload was collected. You should use the convenience method `recorded_at`
- to fill `recorded_at_timestamp` argument, like this: `recorded_at_timestamp: recorded_at`
-- `time_period`: The time period used to build the `relation` argument passed into
- `estimate_batch_distinct_count`. To collect the metric with all available historical
- data, set a `nil` value as time period: `time_period: nil`.
-- `data`: HyperLogLog buckets structure representing unique entries in `relation`.
- The `estimate_batch_distinct_count` method always passes the correct argument
- into the block, so `data` argument must always have a value equal to block argument,
- like this: `data: result`
-
-Example metrics persistence:
-
-```ruby
-class UsageData
- def count_secure_pipelines(time_period)
- ...
- relation = ::Security::Scan.by_scan_types(scan_type).where(time_period)
-
- pipelines_with_secure_jobs['dependency_scanning_pipeline'] = estimate_batch_distinct_count(relation, :pipeline_id, batch_size: 1000, start: start_id, finish: finish_id) do |result|
- ::Gitlab::Usage::Metrics::Aggregates::Sources::PostgresHll
- .save_aggregated_metrics(metric_name: 'dependency_scanning_pipeline', recorded_at_timestamp: recorded_at, time_period: time_period, data: result)
- end
- end
-end
-```
-
-#### Add new aggregated metric definition
-
-After all metrics are persisted, you can add an aggregated metric definition following [Aggregated metric instrumentation guide](metrics_instrumentation.md#aggregated-metrics).
-To declare the aggregate of metrics collected with [Estimated Batch Counters](#estimated-batch-counters),
-you must fulfill the following requirements:
-
-- Metrics names listed in the `events:` attribute, have to use the same names you passed in the `metric_name` argument while persisting metrics in previous step.
-- Every metric listed in the `events:` attribute, has to be persisted for **every** selected `time_frame:` value.
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/service_ping/index.md b/doc/development/service_ping/index.md
index 01772b194a4..d0806ed375b 100644
--- a/doc/development/service_ping/index.md
+++ b/doc/development/service_ping/index.md
@@ -1,509 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/index.md'
+remove_date: '2023-08-20'
---
-# Service Ping development guidelines
+This document was moved to [another location](../internal_analytics/service_ping/index.md).
-> - Introduced in GitLab Ultimate 11.2, more statistics.
-> - In GitLab 14.1, [renamed from Usage Ping to Service Ping](https://gitlab.com/groups/gitlab-org/-/epics/5990). In 14.0 and earlier, use the Usage Ping documentation for the Rails commands appropriate to your version.
-
-Service Ping is a GitLab process that collects and sends a weekly payload to GitLab.
-The payload provides important high-level data that helps our product, support,
-and sales teams understand how GitLab is used. The data helps to:
-
-- Compare counts month over month (or week over week) to get a rough sense for how an instance uses
- different product features.
-- Collect other facts that help us classify and understand GitLab installations.
-- Calculate our stage monthly active users (SMAU), which helps to measure the success of our stages
- and features.
-
-Service Ping information is not anonymous. It's linked to the instance's hostname, but does
-not contain project names, usernames, or any other specific data.
-
-Service Ping is enabled by default. However, you can [disable](../../user/admin_area/settings/usage_statistics.md#enable-or-disable-usage-statistics) it on any self-managed instance. When Service Ping is enabled, GitLab gathers data from the other instances and can show your instance's usage statistics to your users.
-
-## Service Ping terminology
-
-We use the following terminology to describe the Service Ping components:
-
-- **Service Ping**: the process that collects and generates a JSON payload.
-- **Service Data**: the contents of the Service Ping JSON payload. This includes metrics.
-- **Metrics**: primarily made up of row counts for different tables in an instance's database. Each
- metric has a corresponding [metric definition](metrics_dictionary.md#metrics-definition-and-validation)
- in a YAML file.
-- **MAU**: monthly active users.
-- **WAU**: weekly active users.
-
-### Limitations
-
-- Service Ping does not track frontend events things like page views, link clicks, or user sessions.
-- Service Ping focuses only on aggregated backend events.
-
-Because of these limitations we recommend you:
-
-- Instrument your products with Snowplow for more detailed analytics on GitLab.com.
-- Use Service Ping to track aggregated backend events on self-managed instances.
-
-## Service Ping request flow
-
-The following example shows a basic request/response flow between a GitLab instance, the Versions Application, the License Application, Salesforce, the GitLab S3 Bucket, the GitLab Snowflake Data Warehouse, and Sisense:
-
-```mermaid
-sequenceDiagram
- participant GitLab Instance
- participant Versions Application
- participant Licenses Application
- participant Salesforce
- participant S3 Bucket
- participant Snowflake DW
- participant Sisense Dashboards
- GitLab Instance->>Versions Application: Send Service Ping
- loop Process usage data
- Versions Application->>Versions Application: Parse usage data
- Versions Application->>Versions Application: Write to database
- Versions Application->>Versions Application: Update license ping time
- end
- loop Process data for Salesforce
- Versions Application-xLicenses Application: Request Zuora subscription id
- Licenses Application-xVersions Application: Zuora subscription id
- Versions Application-xSalesforce: Request Zuora account id by Zuora subscription id
- Salesforce-xVersions Application: Zuora account id
- Versions Application-xSalesforce: Usage data for the Zuora account
- end
- Versions Application->>S3 Bucket: Export Versions database
- S3 Bucket->>Snowflake DW: Import data
- Snowflake DW->>Snowflake DW: Transform data using dbt
- Snowflake DW->>Sisense Dashboards: Data available for querying
- Versions Application->>GitLab Instance: DevOps Score (Conversational Development Index)
-```
-
-## How Service Ping works
-
-1. The Service Ping [cron job](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/workers/gitlab_service_ping_worker.rb#L24) is set in Sidekiq to run weekly.
-1. When the cron job runs, it calls [`Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values)`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/services/service_ping/submit_service.rb).
-1. `Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values)` [cascades down](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb) to ~400+ other counter method calls.
-1. The response of all methods calls are [merged together](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb#L68) into a single JSON payload.
-1. The JSON payload is then [posted to the Versions application](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/services/service_ping/submit_service.rb#L20)
- If a firewall exception is needed, the required URL depends on several things. If
- the hostname is `version.gitlab.com`, the protocol is `TCP`, and the port number is `443`,
- the required URL is <https://version.gitlab.com/>.
-1. In case of an error, it will be reported to the Version application along with following pieces of information:
-
- - `uuid` - GitLab instance unique identifier
- - `hostname` - GitLab instance hostname
- - `version` - GitLab instance current versions
- - `elapsed` - Amount of time which passed since Service Ping report process started and moment of error occurrence
- - `message` - Error message
-
- <pre>
- <code>
- {
- "uuid"=>"02333324-1cd7-4c3b-a45b-a4993f05fb1d",
- "hostname"=>"127.0.0.1",
- "version"=>"14.7.0-pre",
- "elapsed"=>0.006946,
- "message"=>'PG::UndefinedColumn: ERROR: column \"non_existent_attribute\" does not exist\nLINE 1: SELECT COUNT(non_existent_attribute) FROM \"issues\" /*applica...'
- }
- </code>
- </pre>
-
-1. Finally, the timing metadata information that is used for diagnostic purposes is submitted to the Versions application. It consists of a list of metric identifiers and the time it took to calculate the metrics:
-
- > - [Introduced](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/37911) in GitLab 15.0 [with a flag](../../user/feature_flags.md), enabled by default.
- > - [Generally available](https://gitlab.com/gitlab-org/gitlab/-/issues/295289) in GitLab 15.2. [Feature flag `measure_service_ping_metric_collection`](https://gitlab.com/gitlab-org/gitlab/-/issues/358128) removed.
-
-```ruby
- {
- "metadata"=>
- {
- "uuid"=>"0000000-0000-0000-0000-000000000000",
- "metrics"=>
- [{"name"=>"version", "time_elapsed"=>1.1811964213848114e-05},
- {"name"=>"installation_type", "time_elapsed"=>0.00017242692410945892},
- {"name"=>"license_billable_users", "time_elapsed"=>0.009520471096038818},
- ....
- {"name"=>"counts.clusters_platforms_eks",
- "time_elapsed"=>0.05638605775311589},
- {"name"=>"counts.clusters_platforms_gke",
- "time_elapsed"=>0.40995341585949063},
- {"name"=>"counts.clusters_platforms_user",
- "time_elapsed"=>0.06410990096628666},
- {"name"=>"counts.clusters_management_project",
- "time_elapsed"=>0.24020783510059118}
- ]
- }
- }
-```
-
-### On a Geo secondary site
-
-We also collect metrics specific to [Geo](../../administration/geo/index.md) secondary sites to send with Service Ping.
-
-1. The [Geo secondary service ping cron job](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/workers/geo/secondary_usage_data_cron_worker.rb) is set in Sidekiq to run weekly.
-1. When the cron job runs, it calls [`SecondaryUsageData.update_metrics!`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/geo/secondary_usage_data.rb#L33). This collects the relevant metrics from Prometheus and stores the data in the Geo secondary tracking database for transmission to the primary site during a [Geo node status update](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/app/models/geo_node_status.rb#L105).
-1. Geo node status data is sent with the JSON payload in the process described above. The following is an example of the payload where each object in the array represents a Geo node:
-
- ```json
- [
- {
- "repository_verification_enabled"=>true,
- "repositories_replication_enabled"=>true,
- "repositories_synced_count"=>24,
- "repositories_failed_count"=>0,
- "git_fetch_event_count_weekly"=>nil,
- "git_push_event_count_weekly"=>nil,
- ... other geo node status fields
- }
- ]
- ```
-
-## Implementing Service Ping
-
-See the [implement Service Ping](implement.md) guide.
-
-## Example Service Ping payload
-
-The following is example content of the Service Ping payload.
-
-```json
-{
- "uuid": "0000000-0000-0000-0000-000000000000",
- "hostname": "example.com",
- "version": "12.10.0-pre",
- "installation_type": "omnibus-gitlab",
- "active_user_count": 999,
- "recorded_at": "2020-04-17T07:43:54.162+00:00",
- "edition": "EEU",
- "license_md5": "00000000000000000000000000000000",
- "license_sha256": "0000000000000000000000000000000000000000000000000000000000000000",
- "license_id": null,
- "historical_max_users": 999,
- "licensee": {
- "Name": "ABC, Inc.",
- "Email": "email@example.com",
- "Company": "ABC, Inc."
- },
- "license_user_count": 999,
- "license_starts_at": "2020-01-01",
- "license_expires_at": "2021-01-01",
- "license_plan": "ultimate",
- "license_add_ons": {
- },
- "license_trial": false,
- "counts": {
- "assignee_lists": 999,
- "boards": 999,
- "ci_builds": 999,
- ...
- },
- "container_registry_enabled": true,
- "dependency_proxy_enabled": false,
- "gitlab_shared_runners_enabled": true,
- "gravatar_enabled": true,
- "influxdb_metrics_enabled": true,
- "ldap_enabled": false,
- "mattermost_enabled": false,
- "omniauth_enabled": true,
- "prometheus_enabled": false,
- "prometheus_metrics_enabled": false,
- "reply_by_email_enabled": "incoming+%{key}@incoming.gitlab.com",
- "signup_enabled": true,
- "projects_with_expiration_policy_disabled": 999,
- "projects_with_expiration_policy_enabled": 999,
- ...
- "elasticsearch_enabled": true,
- "license_trial_ends_on": null,
- "geo_enabled": false,
- "git": {
- "version": {
- "major": 2,
- "minor": 26,
- "patch": 1
- }
- },
- "gitaly": {
- "version": "12.10.0-rc1-93-g40980d40",
- "servers": 56,
- "clusters": 14,
- "filesystems": [
- "EXT_2_3_4"
- ]
- },
- "gitlab_pages": {
- "enabled": true,
- "version": "1.17.0"
- },
- "container_registry_server": {
- "vendor": "gitlab",
- "version": "2.9.1-gitlab"
- },
- "database": {
- "adapter": "postgresql",
- "version": "9.6.15",
- "pg_system_id": 6842684531675334351,
- "flavor": "Cloud SQL for PostgreSQL"
- },
- "analytics_unique_visits": {
- "g_analytics_contribution": 999,
- ...
- },
- "usage_activity_by_stage": {
- "configure": {
- "project_clusters_enabled": 999,
- ...
- },
- "create": {
- "merge_requests": 999,
- ...
- },
- "manage": {
- "events": 999,
- ...
- },
- "monitor": {
- "clusters": 999,
- ...
- },
- "package": {
- "projects_with_packages": 999
- },
- "plan": {
- "issues": 999,
- ...
- },
- "release": {
- "deployments": 999,
- ...
- },
- "secure": {
- "user_container_scanning_jobs": 999,
- ...
- },
- "verify": {
- "ci_builds": 999,
- ...
- }
- },
- "usage_activity_by_stage_monthly": {
- "configure": {
- "project_clusters_enabled": 999,
- ...
- },
- "create": {
- "merge_requests": 999,
- ...
- },
- "manage": {
- "events": 999,
- ...
- },
- "monitor": {
- "clusters": 999,
- ...
- },
- "package": {
- "projects_with_packages": 999
- },
- "plan": {
- "issues": 999,
- ...
- },
- "release": {
- "deployments": 999,
- ...
- },
- "secure": {
- "user_container_scanning_jobs": 999,
- ...
- },
- "verify": {
- "ci_builds": 999,
- ...
- }
- },
- "topology": {
- "duration_s": 0.013836685999194742,
- "application_requests_per_hour": 4224,
- "query_apdex_weekly_average": 0.996,
- "failures": [],
- "nodes": [
- {
- "node_memory_total_bytes": 33269903360,
- "node_memory_utilization": 0.35,
- "node_cpus": 16,
- "node_cpu_utilization": 0.2,
- "node_uname_info": {
- "machine": "x86_64",
- "sysname": "Linux",
- "release": "4.19.76-linuxkit"
- },
- "node_services": [
- {
- "name": "web",
- "process_count": 16,
- "process_memory_pss": 233349888,
- "process_memory_rss": 788220927,
- "process_memory_uss": 195295487,
- "server": "puma"
- },
- {
- "name": "sidekiq",
- "process_count": 1,
- "process_memory_pss": 734080000,
- "process_memory_rss": 750051328,
- "process_memory_uss": 731533312
- },
- ...
- ],
- ...
- },
- ...
- ]
- }
-}
-```
-
-## Notable changes
-
-In GitLab 14.6, [`flavor`](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/75587) was added to try to detect the underlying managed database variant.
-Possible values are "Amazon Aurora PostgreSQL", "PostgreSQL on Amazon RDS", "Cloud SQL for PostgreSQL",
-"Azure Database for PostgreSQL - Flexible Server", or "null".
-
-In GitLab 13.5, `pg_system_id` was added to send the [PostgreSQL system identifier](https://www.2ndquadrant.com/en/blog/support-for-postgresqls-system-identifier-in-barman/).
-
-## Export Service Ping data
-
-Rake tasks exist to export Service Ping data in different formats.
-
-- The Rake tasks export the raw SQL queries for `count`, `distinct_count`, `sum`.
-- The Rake tasks export the Redis counter class or the line of the Redis block for `redis_usage_data`.
-- The Rake tasks calculate the `alt_usage_data` metrics.
-
-In the home directory of your local GitLab installation run the following Rake tasks for the YAML and JSON versions respectively:
-
-```shell
-# for YAML export of SQL queries
-bin/rake gitlab:usage_data:dump_sql_in_yaml
-
-# for JSON export of SQL queries
-bin/rake gitlab:usage_data:dump_sql_in_json
-
-# for JSON export of Non SQL data
-bin/rake gitlab:usage_data:dump_non_sql_in_json
-
-# You may pipe the output into a file
-bin/rake gitlab:usage_data:dump_sql_in_yaml > ~/Desktop/usage-metrics-2020-09-02.yaml
-```
-
-## Generate Service Ping
-
-To generate Service Ping, use [Teleport](https://goteleport.com/docs/) or a detached screen session on a remote server.
-
-### Triggering
-
-#### Trigger Service Ping with Teleport
-
-1. Request temporary [access](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/teleport/Connect_to_Rails_Console_via_Teleport.md#how-to-use-teleport-to-connect-to-rails-console) to the required environment.
-1. After your approval is issued, [access the Rails console](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/teleport/Connect_to_Rails_Console_via_Teleport.md#access-approval).
-1. Run `GitlabServicePingWorker.new.perform('triggered_from_cron' => false)`.
-
-#### Trigger Service Ping with a detached screen session
-
-1. Connect to bastion with agent forwarding:
-
- ```shell
- ssh -A lb-bastion.gprd.gitlab.com
- ```
-
-1. Create named screen:
-
- ```shell
- screen -S <username>_usage_ping_<date>
- ```
-
-1. Connect to console host:
-
- ```shell
- ssh $USER-rails@console-01-sv-gprd.c.gitlab-production.internal
- ```
-
-1. Run:
-
- ```shell
- GitlabServicePingWorker.new.perform('triggered_from_cron' => false)
- ```
-
-1. To detach from screen, press `ctrl + A`, `ctrl + D`.
-1. Exit from bastion:
-
- ```shell
- exit
- ```
-
-1. Get the metrics duration from logs:
-
-Search in Google Console logs for `time_elapsed`. [Query example](https://cloudlogging.app.goo.gl/nWheZvD8D3nWazNe6).
-
-### Verification (After approx 30 hours)
-
-#### Verify with Teleport
-
-1. Follow [the steps](https://gitlab.com/gitlab-com/runbooks/-/blob/master/docs/teleport/Connect_to_Rails_Console_via_Teleport.md#how-to-use-teleport-to-connect-to-rails-console) to request a new access to the required environment and connect to the Rails console
-1. Check the last payload in `raw_usage_data` table: `RawUsageData.last.payload`
-1. Check the when the payload was sent: `RawUsageData.last.sent_at`
-
-#### Verify using detached screen session
-
-1. Reconnect to bastion:
-
- ```shell
- ssh -A lb-bastion.gprd.gitlab.com
- ```
-
-1. Find your screen session:
-
- ```shell
- screen -ls
- ```
-
-1. Attach to your screen session:
-
- ```shell
- screen -x 14226.mwawrzyniak_usage_ping_2021_01_22
- ```
-
-1. Check the last payload in `raw_usage_data` table:
-
- ```shell
- RawUsageData.last.payload
- ```
-
-1. Check the when the payload was sent:
-
- ```shell
- RawUsageData.last.sent_at
- ```
-
-### Skip database write operations
-
-To skip database write operations, DevOps report creation, and storage of usage data payload, pass an optional argument:
-
-```shell
-skip_db_write:
-GitlabServicePingWorker.new.perform('triggered_from_cron' => false, 'skip_db_write' => true)
-```
-
-## Monitoring
-
-Service Ping reporting process state is monitored with [internal SiSense dashboard](https://app.periscopedata.com/app/gitlab/968489).
-
-## Related topics
-
-- [Analytics Instrumentation Guide](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/)
-- [Snowplow Guide](../snowplow/index.md)
-- [Analytics Instrumentation Direction](https://about.gitlab.com/direction/analytics/analytics-instrumentation/)
-- [Data Analysis Process](https://about.gitlab.com/handbook/business-technology/data-team/#data-analysis-process/)
-- [Data for Product Managers](https://about.gitlab.com/handbook/business-technology/data-team/programs/data-for-product-managers/)
-- [Data Infrastructure](https://about.gitlab.com/handbook/business-technology/data-team/platform/infrastructure/)
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/service_ping/metrics_dictionary.md b/doc/development/service_ping/metrics_dictionary.md
index d53400276d0..fecab4916f5 100644
--- a/doc/development/service_ping/metrics_dictionary.md
+++ b/doc/development/service_ping/metrics_dictionary.md
@@ -1,334 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/metrics_dictionary.md'
+remove_date: '2023-08-20'
---
-# Metrics Dictionary Guide
+This document was moved to [another location](../internal_analytics/service_ping/metrics_dictionary.md).
-[Service Ping](index.md) metrics are defined in individual YAML files definitions from which the
-[Metrics Dictionary](https://metrics.gitlab.com/) is built. Currently, the metrics dictionary is built automatically once a day. When a change to a metric is made in a YAML file, you can see the change in the dictionary within 24 hours.
-This guide describes the dictionary and how it's implemented.
-
-## Metrics Definition and validation
-
-We are using [JSON Schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json) to validate the metrics definition.
-
-This process is meant to ensure consistent and valid metrics defined for Service Ping. All metrics *must*:
-
-- Comply with the defined [JSON schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json).
-- Have a unique `key_path` .
-- Have an owner.
-
-All metrics are stored in YAML files:
-
-- [`config/metrics`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/metrics)
-
-WARNING:
-Only metrics with a metric definition YAML and whose status is not `removed` are added to the Service Ping JSON payload.
-
-Each metric is defined in a separate YAML file consisting of a number of fields:
-
-| Field | Required | Additional information |
-|---------------------|----------|----------------------------------------------------------------|
-| `key_path` | yes | JSON key path for the metric, location in Service Ping payload. |
-| `name` (deprecated) | no | Metric name suggestion. Does not have any impact on the Service Ping payload, only serves as documentation. |
-| `description` | yes | |
-| `product_section` | yes | The [section](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/data/sections.yml). |
-| `product_stage` | yes | The [stage](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) for the metric. |
-| `product_group` | yes | The [group](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) that owns the metric. |
-| `value_type` | yes | `string`; one of [`string`, `number`, `boolean`, `object`](https://json-schema.org/understanding-json-schema/reference/type.html). |
-| `status` | yes | `string`; [status](#metric-statuses) of the metric, may be set to `active`, `removed`, `broken`. |
-| `time_frame` | yes | `string`; may be set to a value like `7d`, `28d`, `all`, `none`. |
-| `data_source` | yes | `string`; may be set to a value like `database`, `redis`, `redis_hll`, `prometheus`, `system`, `license`. |
-| `data_category` | yes | `string`; [categories](#data-category) of the metric, may be set to `operational`, `optional`, `subscription`, `standard`. The default value is `optional`.|
-| `instrumentation_class` | yes | `string`; [the class that implements the metric](metrics_instrumentation.md). |
-| `distribution` | yes | `array`; may be set to one of `ce, ee` or `ee`. The [distribution](https://about.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/#definitions) where the tracked feature is available. |
-| `performance_indicator_type` | no | `array`; may be set to one of [`gmau`, `smau`, `paid_gmau`, `umau` or `customer_health_score`](https://about.gitlab.com/handbook/business-technology/data-team/data-catalog/xmau-analysis/). |
-| `tier` | yes | `array`; may contain one or a combination of `free`, `premium` or `ultimate`. The [tier](https://about.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/#definitions) where the tracked feature is available. This should be verbose and contain all tiers where a metric is available. |
-| `milestone` | yes | The milestone when the metric is introduced and when it's available to self-managed instances with the official GitLab release. |
-| `milestone_removed` | no | The milestone when the metric is removed. |
-| `introduced_by_url` | no | The URL to the merge request that introduced the metric to be available for self-managed instances. |
-| `removed_by_url` | no | The URL to the merge request that removed the metric. |
-| `repair_issue_url` | no | The URL of the issue that was created to repair a metric with a `broken` status. |
-| `options` | no | `object`: options information needed to calculate the metric value. |
-| `skip_validation` | no | This should **not** be set. [Used for imported metrics until we review, update and make them valid](https://gitlab.com/groups/gitlab-org/-/epics/5425). |
-
-### Metric `key_path`
-
-The `key_path` of the metric is the location in the JSON Service Ping payload.
-
-The `key_path` could be composed from multiple parts separated by `.` and it must be unique.
-
-We recommend to add the metric in one of the top-level keys:
-
-- `settings`: for settings related metrics.
-- `counts_weekly`: for counters that have data for the most recent 7 days.
-- `counts_monthly`: for counters that have data for the most recent 28 days.
-- `counts`: for counters that have data for all time.
-
-NOTE:
-We can't control what the metric's `key_path` is, because some of them are generated dynamically in `usage_data.rb`.
-For example, see [Redis HLL metrics](implement.md#redis-hll-counters).
-
-### Metric name (deprecated)
-
-WARNING:
-This feature was deprecated in GitLab 16.1
-and is planned for [removal](https://gitlab.com/gitlab-org/gitlab/-/issues/411602) in 16.2.
-
-To improve metric discoverability by a wider audience, each metric with
-instrumentation added at an appointed `key_path` receives a `name` attribute
-filled with the name suggestion, corresponding to the metric `data_source` and instrumentation.
-Metric name suggestions can contain two types of elements:
-
-1. **User input prompts**: enclosed by angle brackets (`< >`), these pieces should be replaced or
- removed when you create a metrics YAML file.
-1. **Fixed suggestion**: plaintext parts generated according to well-defined algorithms.
- They are based on underlying instrumentation, and must not be changed.
-
-For a metric name to be valid, it must not include any prompt, and fixed suggestions
-must not be changed.
-
-#### Generate a metric name suggestion (deprecated)
-
-WARNING:
-This feature was deprecated in GitLab 16.1
-and is planned for [removal](https://gitlab.com/gitlab-org/gitlab/-/issues/411602) in 16.2.
-
-The metric YAML generator can suggest a metric name for you.
-To generate a metric name suggestion, first instrument the metric at the provided `key_path`.
-Then, generate the metric's YAML definition and
-return to the instrumentation and update it.
-
-1. Add the metric instrumentation class to `lib/gitlab/usage/metrics/instrumentations/`.
-1. Add the metric logic in the instrumentation class.
-1. Run the [metrics YAML generator](metrics_dictionary.md#create-a-new-metric-definition).
-1. Use the metric name suggestion to select a suitable metric name.
-1. Update the metric's YAML definition with the correct `key_path`.
-
-### Metric statuses
-
-Metric definitions can have one of the following statuses:
-
-- `active`: Metric is used and reports data.
-- `broken`: Metric reports broken data (for example, -1 fallback), or does not report data at all. A metric marked as `broken` must also have the `repair_issue_url` attribute.
-- `removed`: Metric was removed, but it may appear in Service Ping payloads sent from instances running on older versions of GitLab.
-
-### Metric `value_type`
-
-Metric definitions can have one of the following values for `value_type`:
-
-- `boolean`
-- `number`
-- `string`
-- `object`: A metric with `value_type: object` must have `value_json_schema` with a link to the JSON schema for the object.
-In general, we avoid complex objects and prefer one of the `boolean`, `number`, or `string` value types.
-An example of a metric that uses `value_type: object` is `topology` (`/config/metrics/settings/20210323120839_topology.yml`),
-which has a related schema in `/config/metrics/objects_schemas/topology_schema.json`.
-
-### Metric `time_frame`
-
-A metric's time frame is calculated based on the `time_frame` field and the `data_source` of the metric.
-For `redis_hll` metrics, the type of aggregation is also taken into consideration. In this context, the term "aggregation" refers to [chosen events data storage interval](implement.md#add-new-events), and is **NOT** related to the Aggregated Metrics feature.
-For more information about the aggregation type of each feature, see the [`common.yml` file](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data_counters/known_events/common.yml). Weeks run from Monday to Sunday.
-
-| data_source | time_frame | aggregation | Description |
-|------------------------|------------|----------------|-------------------------------------------------|
-| any | `none` | not applicable | A type of data that’s not tracked over time, such as settings and configuration information |
-| `database` | `all` | not applicable | The whole time the metric has been active (all-time interval) |
-| `database` | `7d` | not applicable | 9 days ago to 2 days ago |
-| `database` | `28d` | not applicable | 30 days ago to 2 days ago |
-| `redis` | `all` | not applicable | The whole time the metric has been active (all-time interval) |
-| `redis_hll` | `7d` | `daily` | Most recent 7 complete days |
-| `redis_hll` | `7d` | `weekly` | Most recent complete week |
-| `redis_hll` | `28d` | `daily` | Most recent 28 complete days |
-| `redis_hll` | `28d` | `weekly` | Most recent 4 complete weeks |
-
-### Data category
-
-We use the following categories to classify a metric:
-
-- `operational`: Required data for operational purposes.
-- `optional`: Default value for a metric. Data that is optional to collect. This can be [enabled or disabled](../../user/admin_area/settings/usage_statistics.md#enable-or-disable-usage-statistics) in the Admin Area.
-- `subscription`: Data related to licensing.
-- `standard`: Standard set of identifiers that are included when collecting data.
-
-An aggregate metric is a metric that is the sum of two or more child metrics. Service Ping uses the data category of
-the aggregate metric to determine whether or not the data is included in the reported Service Ping payload.
-
-### Metric name suggestion examples (deprecated)
-
-WARNING:
-This feature was deprecated in GitLab 16.1
-and is planned for [removal](https://gitlab.com/gitlab-org/gitlab/-/issues/411602) in 16.2.
-
-#### Metric with `data_source: database`
-
-For a metric instrumented with SQL:
-
-```sql
-SELECT COUNT(DISTINCT user_id) FROM clusters WHERE clusters.management_project_id IS NOT NULL
-```
-
-- **Suggested name**: `count_distinct_user_id_from_<adjective describing: '(clusters.management_project_id IS NOT NULL)'>_clusters`
-- **Prompt**: `<adjective describing: '(clusters.management_project_id IS NOT NULL)'>`
- should be replaced with an adjective that best represents filter conditions, such as `project_management`
-- **Final metric name**: For example, `count_distinct_user_id_from_project_management_clusters`
-
-For metric instrumented with SQL:
-
-```sql
-SELECT COUNT(DISTINCT clusters.user_id)
-FROM clusters_applications_helm
-INNER JOIN clusters ON clusters.id = clusters_applications_helm.cluster_id
-WHERE clusters_applications_helm.status IN (3, 5)
-```
-
-- **Suggested name**: `count_distinct_user_id_from_<adjective describing: '(clusters_applications_helm.status IN (3, 5))'>_clusters_<with>_<adjective describing: '(clusters_applications_helm.status IN (3, 5))'>_clusters_applications_helm`
-- **Prompt**: `<adjective describing: '(clusters_applications_helm.status IN (3, 5))'>`
- should be replaced with an adjective that best represents filter conditions
-- **Final metric name**: `count_distinct_user_id_from_clusters_with_available_clusters_applications_helm`
-
-In the previous example, the prompt is irrelevant, and user can remove it. The second
-occurrence corresponds with the `available` scope defined in `Clusters::Concerns::ApplicationStatus`.
-It can be used as the right adjective to replace prompt.
-
-The `<with>` represents a suggested conjunction for the suggested name of the joined relation.
-The person documenting the metric can use it by either:
-
-- Removing the surrounding `<>`.
-- Using a different conjunction, such as `having` or `including`.
-
-#### Metric with `data_source: redis` or `redis_hll`
-
-For metrics instrumented with a Redis-based counter, the suggested name includes
-only the single prompt to be replaced by the person working with metrics YAML.
-
-- **Prompt**: `<please fill metric name, suggested format is: {subject}_{verb}{ing|ed}_{object} eg: users_creating_epics or merge_requests_viewed_in_single_file_mode>`
-- **Final metric name**: We suggest the metric name should follow the format of
- `{subject}_{verb}{ing|ed}_{object}`, such as `user_creating_epics`, `users_triggering_security_scans`,
- or `merge_requests_viewed_in_single_file_mode`
-
-#### Metric with `data_source: prometheus` or `system`
-
-For metrics instrumented with Prometheus or coming from the operating system,
-the suggested name includes only the single prompt by person working with metrics YAML.
-
-- **Prompt**: `<please fill metric name>`
-- **Final metric name**: Due to the variety of cases that can apply to this kind of metric,
- no naming convention exists. Each person instrumenting a metric should use their
- best judgment to come up with a descriptive name.
-
-### Example YAML metric definition
-
-The linked [`uuid`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/license/uuid.yml)
-YAML file includes an example metric definition, where the `uuid` metric is the GitLab
-instance unique identifier.
-
-```yaml
-key_path: uuid
-description: GitLab instance unique identifier
-product_section: analytics
-product_stage: analytics
-product_group: analytics_instrumentation
-value_type: string
-status: active
-milestone: 9.1
-instrumentation_class: UuidMetric
-introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/1521
-time_frame: none
-data_source: database
-distribution:
-- ce
-- ee
-tier:
-- free
-- premium
-- ultimate
-```
-
-### Create a new metric definition
-
-The GitLab codebase provides a dedicated [generator](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/generators/gitlab/usage_metric_definition_generator.rb) to create new metric definitions.
-
-For uniqueness, the generated files include a timestamp prefix in ISO 8601 format.
-
-The generator takes a list of key paths and 3 options as arguments. It creates metric YAML definitions in the corresponding location:
-
-- `--ee`, `--no-ee` Indicates if metric is for EE.
-- `--dir=DIR` Indicates the metric directory. It must be one of: `counts_7d`, `7d`, `counts_28d`, `28d`, `counts_all`, `all`, `settings`, `license`.
-- `--class_name=CLASS_NAME` Indicates the instrumentation class. For example `UsersCreatingIssuesMetric`, `UuidMetric`
-
-**Single metric example**
-
-```shell
-bundle exec rails generate gitlab:usage_metric_definition counts.issues --dir=7d --class_name=CountIssues
-// Creates 1 file
-// create config/metrics/counts_7d/issues.yml
-```
-
-**Multiple metrics example**
-
-```shell
-bundle exec rails generate gitlab:usage_metric_definition counts.issues counts.users --dir=7d --class_name=CountUsersCreatingIssues
-// Creates 2 files
-// create config/metrics/counts_7d/issues.yml
-// create config/metrics/counts_7d/users.yml
-```
-
-NOTE:
-To create a metric definition used in EE, add the `--ee` flag.
-
-```shell
-bundle exec rails generate gitlab:usage_metric_definition counts.issues --ee --dir=7d --class_name=CountUsersCreatingIssues
-// Creates 1 file
-// create ee/config/metrics/counts_7d/issues.yml
-```
-
-### Metrics added dynamic to Service Ping payload
-
-The [Redis HLL metrics](implement.md#known-events-are-added-automatically-in-service-data-payload) are added automatically to Service Ping payload.
-
-A YAML metric definition is required for each metric. A dedicated generator is provided to create metric definitions for Redis HLL events.
-
-The generator takes `category` and `events` arguments, as the root key is `redis_hll_counters`, and creates two metric definitions for each of the events (for weekly and monthly time frames):
-
-**Single metric example**
-
-```shell
-bundle exec rails generate gitlab:usage_metric_definition:redis_hll issues count_users_closing_issues
-// Creates 2 files
-// create config/metrics/counts_7d/count_users_closing_issues_weekly.yml
-// create config/metrics/counts_28d/count_users_closing_issues_monthly.yml
-```
-
-**Multiple metrics example**
-
-```shell
-bundle exec rails generate gitlab:usage_metric_definition:redis_hll issues count_users_closing_issues count_users_reopening_issues
-// Creates 4 files
-// create config/metrics/counts_7d/count_users_closing_issues_weekly.yml
-// create config/metrics/counts_28d/count_users_closing_issues_monthly.yml
-// create config/metrics/counts_7d/count_users_reopening_issues_weekly.yml
-// create config/metrics/counts_28d/count_users_reopening_issues_monthly.yml
-```
-
-To create a metric definition used in EE, add the `--ee` flag.
-
-```shell
-bundle exec rails generate gitlab:usage_metric_definition:redis_hll issues users_closing_issues --ee
-// Creates 2 files
-// create config/metrics/counts_7d/i_closed_weekly.yml
-// create config/metrics/counts_28d/i_closed_monthly.yml
-```
-
-## Metrics Dictionary
-
-[Metrics Dictionary is a separate application](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/metric-dictionary).
-
-All metrics available in Service Ping are in the [Metrics Dictionary](https://metrics.gitlab.com/).
-
-### Copy query to clipboard
-
-To check if a metric has data in Sisense, use the copy query to clipboard feature. This copies a query that's ready to use in Sisense. The query gets the last five service ping data for GitLab.com for a given metric. For information about how to check if a Service Ping metric has data in Sisense, see this [demo](https://www.youtube.com/watch?v=n4o65ivta48).
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/service_ping/metrics_instrumentation.md b/doc/development/service_ping/metrics_instrumentation.md
index b6ca773a572..5a4dfc325e2 100644
--- a/doc/development/service_ping/metrics_instrumentation.md
+++ b/doc/development/service_ping/metrics_instrumentation.md
@@ -1,478 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/metrics_instrumentation.md'
+remove_date: '2023-08-20'
---
-# Metrics instrumentation guide
+This document was moved to [another location](../internal_analytics/service_ping/metrics_instrumentation.md).
-This guide describes how to develop Service Ping metrics using metrics instrumentation.
-
-<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-For a video tutorial, see the [Adding Service Ping metric via instrumentation class](https://youtu.be/p2ivXhNxUoY).
-
-## Nomenclature
-
-- **Instrumentation class**:
- - Inherits one of the metric classes: `DatabaseMetric`, `RedisMetric`, `RedisHLLMetric`, `NumbersMetric` or `GenericMetric`.
- - Implements the logic that calculates the value for a Service Ping metric.
-
-- **Metric definition**
- The Service Data metric YAML definition.
-
-- **Hardening**:
- Hardening a method is the process that ensures the method fails safe, returning a fallback value like -1.
-
-## How it works
-
-A metric definition has the [`instrumentation_class`](metrics_dictionary.md) field, which can be set to a class.
-
-The defined instrumentation class should inherit one of the existing metric classes: `DatabaseMetric`, `RedisMetric`, `RedisHLLMetric`, `NumbersMetric` or `GenericMetric`.
-
-The current convention is that a single instrumentation class corresponds to a single metric. On rare occasions, there are exceptions to that convention like [Redis metrics](#redis-metrics). To use a single instrumentation class for more than one metric, please reach out to one of the `@gitlab-org/analytics-section/analytics-instrumentation/engineers` members to consult about your case.
-
-Using the instrumentation classes ensures that metrics can fail safe individually, without breaking the entire
- process of Service Ping generation.
-
-We have built a domain-specific language (DSL) to define the metrics instrumentation.
-
-## Database metrics
-
-You can use database metrics to track data kept in the database, for example, a count of issues that exist on a given instance.
-
-- `operation`: Operations for the given `relation`, one of `count`, `distinct_count`, `sum`, and `average`.
-- `relation`: Assigns lambda that returns the `ActiveRecord::Relation` for the objects we want to perform the `operation`. The assigned lambda can accept up to one parameter. The parameter is hashed and stored under the `options` key in the metric definition.
-- `start`: Specifies the start value of the batch counting, by default is `relation.minimum(:id)`.
-- `finish`: Specifies the end value of the batch counting, by default is `relation.maximum(:id)`.
-- `cache_start_and_finish_as`: Specifies the cache key for `start` and `finish` values and sets up caching them. Use this call when `start` and `finish` are expensive queries that should be reused between different metric calculations.
-- `available?`: Specifies whether the metric should be reported. The default is `true`.
-- `timestamp_column`: Optionally specifies timestamp column for metric used to filter records for time constrained metrics. The default is `created_at`.
-
-[Example of a merge request that adds a database metric](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60022).
-
-```ruby
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class CountIssuesMetric < DatabaseMetric
- operation :count
-
- relation ->(options) { Issue.where(confidential: options[:confidential]) }
- end
- end
- end
- end
-end
-```
-
-### Ordinary batch counters Example
-
-```ruby
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class CountIssuesMetric < DatabaseMetric
- operation :count
-
- start { Issue.minimum(:id) }
- finish { Issue.maximum(:id) }
-
- relation { Issue }
- end
- end
- end
- end
-end
-```
-
-### Distinct batch counters Example
-
-```ruby
-# frozen_string_literal: true
-
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class CountUsersAssociatingMilestonesToReleasesMetric < DatabaseMetric
- operation :distinct_count, column: :author_id
-
- relation { Release.with_milestones }
-
- start { Release.minimum(:author_id) }
- finish { Release.maximum(:author_id) }
- end
- end
- end
- end
-end
-```
-
-### Sum Example
-
-```ruby
-# frozen_string_literal: true
-
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class JiraImportsTotalImportedIssuesCountMetric < DatabaseMetric
- operation :sum, column: :imported_issues_count
-
- relation { JiraImportState.finished }
- end
- end
- end
- end
-end
-```
-
-### Average Example
-
-```ruby
-# frozen_string_literal: true
-
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class CountIssuesWeightAverageMetric < DatabaseMetric
- operation :average, column: :weight
-
- relation { Issue }
- end
- end
- end
- end
-end
-```
-
-## Redis metrics
-
-You can use Redis metrics to track events not kept in the database, for example, a count of how many times the search bar has been used.
-
-[Example of a merge request that adds `Redis` metrics](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/103455).
-
-The `RedisMetric` class can only be used as the `instrumentation_class` for Redis metrics with simple counters classes (classes that only inherit `BaseCounter` and set `PREFIX` and `KNOWN_EVENTS` constants). In case the counter class has additional logic included in it, a new `instrumentation_class`, inheriting from `RedisMetric`, needs to be created. This new class needs to include the additional logic from the counter class.
-
-Required options:
-
-- `event`: the event name.
-- `prefix`: the value of the `PREFIX` constant used in the counter classes from the `Gitlab::UsageDataCounters` namespace.
-
-Count unique values for `source_code_pushes` event.
-
-```yaml
-time_frame: all
-data_source: redis
-instrumentation_class: RedisMetric
-options:
- event: pushes
- prefix: source_code
-```
-
-### Availability-restrained Redis metrics
-
-If the Redis metric should only be available in the report under some conditions, then you must specify these conditions in a new class that is a child of the `RedisMetric` class.
-
-```ruby
-# frozen_string_literal: true
-
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class MergeUsageCountRedisMetric < RedisMetric
- available? { Feature.enabled?(:merge_usage_data_missing_key_paths) }
- end
- end
- end
- end
-end
-```
-
-You must also use the class's name in the YAML setup.
-
-```yaml
-time_frame: all
-data_source: redis
-instrumentation_class: MergeUsageCountRedisMetric
-options:
- event: pushes
- prefix: source_code
-```
-
-## Redis HyperLogLog metrics
-
-You can use Redis HyperLogLog metrics to track events not kept in the database and incremented for unique values such as unique users,
-for example, a count of how many different users used the search bar.
-
-[Example of a merge request that adds a `RedisHLL` metric](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/61685).
-
-Count unique values for `i_quickactions_approve` event.
-
-```yaml
-time_frame: 28d
-data_source: redis_hll
-instrumentation_class: RedisHLLMetric
-options:
- events:
- - i_quickactions_approve
-```
-
-### Availability-restrained Redis HyperLogLog metrics
-
-If the Redis HyperLogLog metric should only be available in the report under some conditions, then you must specify these conditions in a new class that is a child of the `RedisHLLMetric` class.
-
-```ruby
-# frozen_string_literal: true
-
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class MergeUsageCountRedisHLLMetric < RedisHLLMetric
- available? { Feature.enabled?(:merge_usage_data_missing_key_paths) }
- end
- end
- end
- end
-end
-```
-
-You must also use the class's name in the YAML setup.
-
-```yaml
-time_frame: 28d
-data_source: redis_hll
-instrumentation_class: MergeUsageCountRedisHLLMetric
-options:
- events:
- - i_quickactions_approve
-```
-
-## Aggregated metrics
-
-<div class="video-fallback">
- See the video from: <a href="https://www.youtube.com/watch?v=22LbYqHwtUQ">Product Intelligence Office Hours Oct 6th</a> for an aggregated metrics walk-through.
-</div>
-<figure class="video-container">
- <iframe src="https://www.youtube-nocookie.com/embed/22LbYqHwtUQ" frameborder="0" allowfullscreen> </iframe>
-</figure>
-
-The aggregated metrics feature provides insight into the number of data attributes, for example `pseudonymized_user_ids`, that occurred in a collection of events. For example, you can aggregate the number of users who perform multiple actions such as creating a new issue and opening
-a new merge request.
-
-You can use a YAML file to define your aggregated metrics. The following arguments are required:
-
-- `options.events`: List of event names to aggregate into metric data. All events in this list must
- use the same data source. Additional data source requirements are described in
- [Database sourced aggregated metrics](implement.md#database-sourced-aggregated-metrics) and
- [Redis sourced aggregated metrics](implement.md#redis-sourced-aggregated-metrics).
-- `options.aggregate.operator`: Operator that defines how the aggregated metric data is counted. Available operators are:
- - `OR`: Removes duplicates and counts all entries that triggered any of the listed events.
- - `AND`: Removes duplicates and counts all elements that were observed triggering all of the following events.
-- `options.aggregate.attribute`: Information pointing to the attribute that is being aggregated across events.
-- `time_frame`: One or more valid time frames. Use these to limit the data included in aggregated metrics to events within a specific date-range. Valid time frames are:
- - `7d`: The last 7 days of data.
- - `28d`: The last 28 days of data.
- - `all`: All historical data, only available for `database` sourced aggregated metrics.
-- `data_source`: Data source used to collect all events data included in the aggregated metrics. Valid data sources are:
- - [`database`](implement.md#database-sourced-aggregated-metrics)
- - [`redis_hll`](implement.md#redis-sourced-aggregated-metrics)
-
-Refer to merge request [98206](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/98206) for an example of a merge request that adds an `AggregatedMetric` metric.
-
-Count unique `user_ids` that occurred in at least one of the events: `incident_management_alert_status_changed`,
-`incident_management_alert_assigned`, `incident_management_alert_todo`, `incident_management_alert_create_incident`.
-
-```yaml
-time_frame: 28d
-instrumentation_class: AggregatedMetric
-data_source: redis_hll
-options:
- aggregate:
- operator: OR
- attribute: user_id
- events:
- - `incident_management_alert_status_changed`
- - `incident_management_alert_assigned`
- - `incident_management_alert_todo`
- - `incident_management_alert_create_incident`
-```
-
-### Availability-restrained Aggregated metrics
-
-If the Aggregated metric should only be available in the report under specific conditions, then you must specify these conditions in a new class that is a child of the `AggregatedMetric` class.
-
-```ruby
-# frozen_string_literal: true
-
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class MergeUsageCountAggregatedMetric < AggregatedMetric
- available? { Feature.enabled?(:merge_usage_data_missing_key_paths) }
- end
- end
- end
- end
-end
-```
-
-You must also use the class's name in the YAML setup.
-
-```yaml
-time_frame: 28d
-instrumentation_class: MergeUsageCountAggregatedMetric
-data_source: redis_hll
-options:
- aggregate:
- operator: OR
- attribute: user_id
- events:
- - `incident_management_alert_status_changed`
- - `incident_management_alert_assigned`
- - `incident_management_alert_todo`
- - `incident_management_alert_create_incident`
-```
-
-## Numbers metrics
-
-- `operation`: Operations for the given `data` block. Currently we only support `add` operation.
-- `data`: a `block` which contains an array of numbers.
-- `available?`: Specifies whether the metric should be reported. The default is `true`.
-
-```ruby
-# frozen_string_literal: true
-
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class IssuesBoardsCountMetric < NumbersMetric
- operation :add
-
- data do |time_frame|
- [
- CountIssuesMetric.new(time_frame: time_frame).value,
- CountBoardsMetric.new(time_frame: time_frame).value
- ]
- end
- end
- end
- end
- end
- end
-end
-```
-
-You must also include the instrumentation class name in the YAML setup.
-
-```yaml
-time_frame: 28d
-instrumentation_class: IssuesBoardsCountMetric
-```
-
-## Generic metrics
-
-You can use generic metrics for other metrics, for example, an instance's database version. Observations type of data will always have a Generic metric counter type.
-
-- `value`: Specifies the value of the metric.
-- `available?`: Specifies whether the metric should be reported. The default is `true`.
-
-[Example of a merge request that adds a generic metric](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60256).
-
-```ruby
-module Gitlab
- module Usage
- module Metrics
- module Instrumentations
- class UuidMetric < GenericMetric
- value do
- Gitlab::CurrentSettings.uuid
- end
- end
- end
- end
- end
-end
-```
-
-## Support for instrumentation classes
-
-There is support for:
-
-- `count`, `distinct_count`, `estimate_batch_distinct_count`, `sum`, and `average` for [database metrics](#database-metrics).
-- [Redis metrics](#redis-metrics).
-- [Redis HLL metrics](#redis-hyperloglog-metrics).
-- `add` for [numbers metrics](#numbers-metrics).
-- [Generic metrics](#generic-metrics), which are metrics based on settings or configurations.
-
-There is no support for:
-
-- `add`, `histogram` for database metrics.
-
-You can [track the progress to support these](https://gitlab.com/groups/gitlab-org/-/epics/6118).
-
-## Create a new metric instrumentation class
-
-To create a stub instrumentation for a Service Ping metric, you can use a dedicated [generator](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/generators/gitlab/usage_metric_generator.rb):
-
-The generator takes the class name as an argument and the following options:
-
-- `--type=TYPE` Required. Indicates the metric type. It must be one of: `database`, `generic`, `redis`, `numbers`.
-- `--operation` Required for `database` & `numbers` type.
- - For `database` it must be one of: `count`, `distinct_count`, `estimate_batch_distinct_count`, `sum`, `average`.
- - For `numbers` it must be: `add`.
-- `--ee` Indicates if the metric is for EE.
-
-```shell
-rails generate gitlab:usage_metric CountIssues --type database --operation distinct_count
- create lib/gitlab/usage/metrics/instrumentations/count_issues_metric.rb
- create spec/lib/gitlab/usage/metrics/instrumentations/count_issues_metric_spec.rb
-```
-
-## Migrate Service Ping metrics to instrumentation classes
-
-This guide describes how to migrate a Service Ping metric from [`lib/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb) or [`ee/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/lib/ee/gitlab/usage_data.rb) to instrumentation classes.
-
-1. Choose the metric type:
-
-- [Database metric](#database-metrics)
-- [Redis HyperLogLog metrics](#redis-hyperloglog-metrics)
-- [Redis metric](#redis-metrics)
-- [Numbers metric](#numbers-metrics)
-- [Generic metric](#generic-metrics)
-
-1. Determine the location of instrumentation class: either under `ee` or outside `ee`.
-
-1. [Generate the instrumentation class file](#create-a-new-metric-instrumentation-class).
-
-1. Fill the instrumentation class body:
-
- - Add code logic for the metric. This might be similar to the metric implementation in `usage_data.rb`.
- - Add tests for the individual metric [`spec/lib/gitlab/usage/metrics/instrumentations/`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/usage/metrics/instrumentations).
- - Add tests for Service Ping.
-
-1. [Generate the metric definition file](metrics_dictionary.md#create-a-new-metric-definition).
-
-1. Remove the code from [`lib/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb) or [`ee/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/lib/ee/gitlab/usage_data.rb).
-
-1. Remove the tests from [`spec/lib/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/spec/lib/gitlab/usage_data_spec.rb) or [`ee/spec/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/spec/lib/ee/gitlab/usage_data_spec.rb).
-
-## Troubleshoot metrics
-
-Sometimes metrics fail for reasons that are not immediately clear. The failures can be related to performance issues or other problems.
-The following pairing session video gives you an example of an investigation in to a real-world failing metric.
-
-<div class="video-fallback">
- See the video from: <a href="https://www.youtube.com/watch?v=y_6m2POx2ug">Product Intelligence Office Hours Oct 27th</a> to learn more about the metrics troubleshooting process.
-</div>
-<figure class="video-container">
- <iframe src="https://www.youtube-nocookie.com/embed/y_6m2POx2ug" frameborder="0" allowfullscreen> </iframe>
-</figure>
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/service_ping/metrics_lifecycle.md b/doc/development/service_ping/metrics_lifecycle.md
index cc56863690c..520b18139ff 100644
--- a/doc/development/service_ping/metrics_lifecycle.md
+++ b/doc/development/service_ping/metrics_lifecycle.md
@@ -1,106 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/metrics_lifecycle.md'
+remove_date: '2023-08-20'
---
-# Service Ping metric lifecycle
+This document was moved to [another location](../internal_analytics/service_ping/metrics_lifecycle.md).
-The following guidelines explain the steps to follow at each stage of a metric's lifecycle.
-
-## Add a new metric
-
-Follow the [Implement Service Ping](implement.md) guide.
-
-## Change an existing metric
-
-WARNING:
-We want to **PREVENT** changes to the calculation logic or important attributes on any metric as this invalidates comparisons of the same metric across different versions of GitLab.
-
-If you change a metric, you have to consider that not all instances of GitLab are running on the newest version. Old instances will still report the old version of the metric.
-Additionally, a metric's reported numbers are primarily interesting compared to previously reported numbers.
-As a result, if you need to change one of the following parts of a metric, you need to add a new metric instead. It's your choice whether to keep the old metric alongside the new one or [remove it](#remove-a-metric).
-
-- **calculation logic**: This means any changes that can produce a different value than the previous implementation
-- **YAML attributes**: The following attributes are directly used for analysis or calculation: `key_path`, `time_frame`, `value_type`, `data_source`.
-
-If you change the `performance_indicator_type` attribute of a metric or think your case needs an exception from the outlined rules then please notify the Customer Success Ops team (`@csops-team`), Analytics Engineers (`@gitlab-data/analytics-engineers`), and Product Analysts (`@gitlab-data/product-analysts`) teams by `@` mentioning those groups in a comment on the merge request or issue.
-
-You can change any other attributes without impact to the calculation or analysis. See [this video tutorial](https://youtu.be/bYf3c01KCls) for help updating metric attributes.
-
-Currently, the [Metrics Dictionary](https://metrics.gitlab.com/) is built automatically once a day. You can see the change in the dictionary within 24 hours when you change the metric's YAML file.
-
-## Remove a metric
-
-WARNING:
-If a metric is not used in Sisense or any other system after 6 months, the
-Analytics Instrumentation team marks it as inactive and assigns it to the group owner for review.
-
-We are working on automating this process. See [this epic](https://gitlab.com/groups/gitlab-org/-/epics/8988) for details.
-
-Analytics Instrumentation removes metrics from Service Ping if they are not used in any Sisense dashboard.
-
-For an example of the metric removal process, see this [example issue](https://gitlab.com/gitlab-org/gitlab/-/issues/388236).
-
-To remove a metric:
-
-1. Create an issue for removing the metric if none exists yet. The issue needs to outline why the metric should be deleted. You can use this issue to document the removal process.
-
-1. Verify the metric is not used to calculate the conversational index. The
- conversational index is a measure that reports back to self-managed instances
- to inform administrators of the progress of DevOps adoption for the instance.
-
- You can check
- [`CalculateConvIndexService`](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/app/services/calculate_conv_index_service.rb)
- to view the metrics that are used. The metrics are represented
- as the keys that are passed as a field argument into the `get_value` method.
-
-1. Verify that removing the metric from the Service Ping payload does not cause
- errors in [Version App](https://gitlab.com/gitlab-services/version-gitlab-com)
- when the updated payload is collected and processed. Version App collects
- and persists all Service Ping reports. To verify Service Ping processing in your local development environment, follow this [guide](https://www.youtube.com/watch?v=FS5emplabRU).
- Alternatively, you can modify [fixtures](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/master/spec/support/usage_data_helpers.rb#L540)
- used to test the [`UsageDataController#create`](https://gitlab.com/gitlab-services/version-gitlab-com/-/blob/3760ef28/spec/controllers/usage_data_controller_spec.rb#L75)
- endpoint, and assure that test suite does not fail when metric that you wish to remove is not included into test payload.
-
-1. Remove data from Redis
-
- For [Ordinary Redis](implement.md#ordinary-redis-counters) counters remove data stored in Redis.
-
- - Add a migration to remove the data from Redis for the related Redis keys. For more details, see [this MR example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/82604/diffs).
-
-1. Create an issue in the
- [GitLab Data Team project](https://gitlab.com/gitlab-data/analytics/-/issues).
- Ask for confirmation that the metric is not referred to in any SiSense dashboards and
- can be safely removed from Service Ping. Use this
- [example issue](https://gitlab.com/gitlab-data/analytics/-/issues/15266) for guidance.
-
-1. Notify the Customer Success Ops team (`@csops-team`), Analytics Engineers (`@gitlab-data/analytics-engineers`), and Product Analysts (`@gitlab-data/product-analysts`) by `@` mentioning those groups in a comment in the issue from step 1 regarding the deletion of the metric.
- Many Service Ping metrics are relied upon for health score and XMAU reporting and unexpected changes to those metrics could break reporting.
-
-1. After you verify the metric can be safely removed,
- update the attributes of the metric's YAML definition:
-
- - Set the `status:` to `removed`.
- - Set `removed_by_url:` to the URL of the MR removing the metric
- - Set `milestone_removed:` to the number of the
- milestone in which the metric was removed.
-
- Do not remove the metric's YAML definition altogether. Some self-managed
- instances might not immediately update to the latest version of GitLab, and
- therefore continue to report the removed metric. The Analytics Instrumentation team
- requires a record of all removed metrics to identify and filter them.
-
- For example please take a look at this [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60149/diffs#b01f429a54843feb22265100c0e4fec1b7da1240_10_10).
-
-1. After you verify the metric can be safely removed,
- remove the metric's instrumentation from
- [`lib/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/usage_data.rb)
- or
- [`ee/lib/ee/gitlab/usage_data.rb`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/ee/lib/ee/gitlab/usage_data.rb).
-
- For example please take a look at this [merge request](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/60149/diffs#6335dc533bd21df26db9de90a02dd66278c2390d_167_167).
-
-1. Remove any other records related to the metric:
- - The feature flag YAML file at [`config/feature_flags/*/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/feature_flags).
- - The entry in the known events YAML file at [`lib/gitlab/usage_data_counters/known_events/*.yaml`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/usage_data_counters/known_events).
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/service_ping/performance_indicator_metrics.md b/doc/development/service_ping/performance_indicator_metrics.md
index d7811c52bb1..eda7224732d 100644
--- a/doc/development/service_ping/performance_indicator_metrics.md
+++ b/doc/development/service_ping/performance_indicator_metrics.md
@@ -1,16 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/performance_indicator_metrics.md'
+remove_date: '2023-08-20'
---
-# Performance Indicator Metrics guide
+This document was moved to [another location](../internal_analytics/service_ping/performance_indicator_metrics.md).
-This guide describes how to use metrics definitions to define [performance indicator](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/#implementing-product-performance-indicators) metrics.
-
-To use a metric definition to manage a performance indicator:
-
-1. Create a merge request that includes related changes.
-1. Use labels `~"analytics instrumentation"`, `"~Data Warehouse::Impact Check"`.
-1. Update the metric definition `performance_indicator_type` [field](metrics_dictionary.md#metrics-definition-and-validation).
-1. Create an issue in GitLab Product Data Insights project with the [PI Chart Help template](https://gitlab.com/gitlab-data/product-analytics/-/issues/new?issuable_template=PI%20Chart%20Help) to have the new metric visualized.
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/service_ping/review_guidelines.md b/doc/development/service_ping/review_guidelines.md
index 71c16820e23..d5805f615e2 100644
--- a/doc/development/service_ping/review_guidelines.md
+++ b/doc/development/service_ping/review_guidelines.md
@@ -1,80 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/review_guidelines.md'
+remove_date: '2023-08-20'
---
-# Service Ping review guidelines
+This document was moved to [another location](../internal_analytics/service_ping/review_guidelines.md).
-This page includes introductory material for a
-[Analytics Instrumentation](https://about.gitlab.com/handbook/engineering/development/analytics/analytics-instrumentation/)
-review, and is specific to Service Ping related reviews. For broader advice and
-general best practices for code reviews, refer to our [code review guide](../code_review.md).
-
-## Resources for reviewers
-
-- [Service Ping Guide](index.md)
-- [Metrics Dictionary](https://metrics.gitlab.com/)
-
-## Review process
-
-We recommend a Analytics Instrumentation review when a merge request (MR) touches
-any of the following Service Ping files:
-
-- `usage_data*` files.
-- The Metrics Dictionary, including files in:
- - [`config/metrics`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/metrics).
- - [`ee/config/metrics`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee/config/metrics).
- - [`schema.json`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/metrics/schema.json).
-- Analytics Instrumentation tooling. For example,
- [`Gitlab::UsageMetricDefinitionGenerator`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/generators/gitlab/usage_metric_definition_generator.rb)
-
-### Roles and process
-
-#### The merge request **author** should
-
-- Decide whether a Analytics Instrumentation review is needed. You can skip the Analytics Instrumentation
-review and remove the labels if the changes are not related to the Analytics Instrumentation domain and
-are regular backend changes.
-- If a Analytics Instrumentation review is needed, add the labels
- `~analytics instrumentation` and `~analytics instrumentation::review pending`.
-- For merge requests authored by Analytics Instrumentation team members:
- - Assign both the `~backend` and `~analytics instrumentation` reviews to another Analytics Instrumentation team member.
- - Assign the maintainer review to someone outside of the Analytics Instrumentation group.
-- Assign an
- [engineer](https://gitlab.com/groups/gitlab-org/analytics-section/analytics-instrumentation/engineers/-/group_members?with_inherited_permissions=exclude) from the Analytics Instrumentation team for a review.
-- Set the correct attributes in the metric's YAML definition:
- - `product_section`, `product_stage`, `product_group`
- - Provide a clear description of the metric.
-- Add a changelog [according to guidelines](../changelog.md).
-
-#### The Analytics Instrumentation **reviewer** should
-
-- Perform a first-pass review on the merge request and suggest improvements to the author.
-- Check the [metrics location](metrics_dictionary.md#metric-key_path) in
- the Service Ping JSON payload.
-- Add the `~database` label and ask for a [database review](../database_review.md) for
- metrics that are based on Database.
-- Add `~Data Warehouse::Impact Check` for any database metric that has a query change. Changes in queries can affect [data operations](https://about.gitlab.com/handbook/business-technology/data-team/how-we-work/triage/#gitlabcom-db-structure-changes).
-- For tracking using Redis HLL (HyperLogLog):
- - Check if a [feature flag is needed](implement.md#recommendations).
-- For a metric's YAML definition:
- - Check the metric's `description`.
- - Check the metric's `key_path`.
- - Check the `product_section`, `product_stage`, and `product_group` fields.
- Read the [stages file](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml).
- - Check the file location. Consider the time frame, and if the file should be under `ee`.
- - Check the tiers.
-- If a metric was changed or removed: Make sure the MR author notified the Customer Success Ops team (`@csops-team`), Analytics Engineers (`@gitlab-data/analytics-engineers`), and Product Analysts (`@gitlab-data/product-analysts`) by `@` mentioning those groups in a comment on the issue for the MR and all of these groups have acknowledged the removal.
-- Metrics instrumentations
- - Recommend using metrics instrumentation for new metrics, [if possible](metrics_instrumentation.md#support-for-instrumentation-classes).
-- Approve the MR, and relabel the MR with `~"analytics instrumentation::approved"`.
-
-## Review workload distribution
-
-[Danger bot](../dangerbot.md) adds the list of changed Analytics Instrumentation files
-and pings the
-[`@gitlab-org/analytics-section/analytics-instrumentation/engineers`](https://gitlab.com/groups/gitlab-org/analytics-section/analytics-instrumentation/engineers/-/group_members?with_inherited_permissions=exclude) group for merge requests
-that are not drafts.
-
-Any of the Analytics Instrumentation engineers can be assigned for the Analytics Instrumentation review.
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/service_ping/troubleshooting.md b/doc/development/service_ping/troubleshooting.md
index 2706f570869..31b04c1a6bc 100644
--- a/doc/development/service_ping/troubleshooting.md
+++ b/doc/development/service_ping/troubleshooting.md
@@ -1,164 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/troubleshooting.md'
+remove_date: '2023-08-20'
---
-# Troubleshooting Service Ping
+This document was moved to [another location](../internal_analytics/service_ping/troubleshooting.md).
-## Service Ping Payload drop
-
-### Symptoms
-
-You will be alerted by the [Data team](https://about.gitlab.com/handbook/business-technology/data-team/) and their [Monte Carlo alerting](https://about.gitlab.com/handbook/business-technology/data-team/platform/monte-carlo/).
-
-### Locating the problem
-
-First you need to identify at which stage in Service Ping data pipeline the drop is occurring.
-
-Start at [Service Ping Health Dashboard](https://app.periscopedata.com/app/gitlab/968489) on Sisense.
-
-You can use [this query](https://gitlab.com/gitlab-org/gitlab/-/issues/347298#note_836685350) as an example, to start detecting when the drop started.
-
-### Troubleshoot the GitLab application layer
-
-For results about an investigation conducted into an unexpected drop in Service ping Payload events volume, see [this issue](https://gitlab.com/gitlab-data/analytics/-/issues/11071).
-
-### Troubleshoot VersionApp layer
-
-Check if the [export jobs](https://gitlab.com/gitlab-services/version-gitlab-com#data-export-using-pipeline-schedules) are successful.
-
-Check [Service Ping errors](https://app.periscopedata.com/app/gitlab/968489?widget=14609989&udv=0) in the [Service Ping Health Dashboard](https://app.periscopedata.com/app/gitlab/968489).
-
-### Troubleshoot Google Storage layer
-
-Check if the files are present in [Google Storage](https://console.cloud.google.com/storage/browser/cloudsql-gs-production-efd5e8-cloudsql-exports;tab=objects?project=gs-production-efd5e8&prefix=&forceOnObjectsSortingFiltering=false).
-
-### Troubleshoot the data warehouse layer
-
-Reach out to the [Data team](https://about.gitlab.com/handbook/business-technology/data-team/) to ask about current state of data warehouse. On their handbook page there is a [section with contact details](https://about.gitlab.com/handbook/business-technology/data-team/#how-to-connect-with-us).
-
-### Cannot disable Service Ping with the configuration file
-
-The method to disable Service Ping with the GitLab configuration file does not work in
-GitLab versions 9.3.0 to 13.12.3. To disable it, you must use the Admin Area in
-the GitLab UI instead. For more information, see
-[this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/333269).
-
-GitLab functionality and application settings cannot override or circumvent
-restrictions at the network layer. If Service Ping is blocked by your firewall,
-you are not impacted by this bug.
-
-#### Check if you are affected
-
-You can check if you were affected by this bug by using the Admin Area or by
-checking the configuration file of your GitLab instance:
-
-- Using the Admin Area:
-
- 1. On the left sidebar, expand the top-most chevron (**{chevron-down}**).
- 1. Select **Admin Area**.
- 1. On the left sidebar, select **Settings > Metrics and profiling**.
- 1. Expand **Usage statistics**.
- 1. Are you able to check or uncheck the checkbox to disable Service Ping?
-
- - If _yes_, your GitLab instance is not affected by this bug.
- - If you can't check or uncheck the checkbox, you are affected by this bug.
- See the steps on [how to fix this](#how-to-fix-the-cannot-disable-service-ping-bug).
-
-- Checking your GitLab instance configuration file:
-
- To check whether you're impacted by this bug, check your instance configuration
- settings. The configuration file in which Service Ping can be disabled depends
- on your installation and deployment method, but is typically one of the following:
-
- - `/etc/gitlab/gitlab.rb` for Linux package installations and Docker.
- - `charts.yaml` for GitLab Helm and cloud-native Kubernetes deployments.
- - `gitlab.yml` for GitLab installations from source.
-
- To check the relevant configuration file for strings that indicate whether
- Service Ping is disabled, you can use `grep`:
-
- ```shell
- # Linux package
- grep "usage_ping_enabled'\] = false" /etc/gitlab/gitlab.rb
-
- # Kubernetes charts
- grep "enableUsagePing: false" values.yaml
-
- # From source
- grep "usage_ping_enabled'\] = false" gitlab/config.yml
- ```
-
- If you see any output after running the relevant command, your GitLab instance
- may be affected by the bug. Otherwise, your instance is not affected.
-
-#### How to fix the "Cannot disable Service Ping" bug
-
-To work around this bug, you have two options:
-
-- [Update](../../update/index.md) to GitLab 13.12.4 or newer to fix this bug.
-- If you can't update to GitLab 13.12.4 or newer, enable Service Ping in the
- configuration file, then disable Service Ping in the UI. For example, if you're
- using the Linux package:
-
- 1. Edit `/etc/gitlab/gitlab.rb`:
-
- ```ruby
- gitlab_rails['usage_ping_enabled'] = true
- ```
-
- 1. Reconfigure GitLab:
-
- ```shell
- sudo gitlab-ctl reconfigure
- ```
-
- 1. On the left sidebar, expand the top-most chevron (**{chevron-down}**).
- 1. Select **Admin Area**.
- 1. On the left sidebar, select **Settings > Metrics and profiling**.
- 1. Expand **Usage statistics**.
- 1. Clear the **Enable Service Ping** checkbox.
- 1. Select **Save Changes**.
-
-## Generate Service Ping
-
-### Generate or get the cached Service Ping in rails console
-
-Use the following method in the [rails console](../../administration/operations/rails_console.md#starting-a-rails-console-session).
-
-```ruby
-Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values, cached: true)
-```
-
-### Generate a fresh new Service Ping
-
-Use the following method in the [rails console](../../administration/operations/rails_console.md#starting-a-rails-console-session).
-
-This also refreshes the cached Service Ping displayed in the Admin Area.
-
-```ruby
-Gitlab::Usage::ServicePingReport.for(output: :all_metrics_values)
-```
-
-### Generate and print
-
-Generates Service Ping data in JSON format.
-
-```shell
-gitlab-rake gitlab:usage_data:generate
-```
-
-Generates Service Ping data in YAML format:
-
-```shell
-gitlab-rake gitlab:usage_data:dump_sql_in_yaml
-```
-
-### Generate and send Service Ping
-
-Prints the metrics saved in `conversational_development_index_metrics`.
-
-```shell
-gitlab-rake gitlab:usage_data:generate_and_send
-```
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/service_ping/usage_data.md b/doc/development/service_ping/usage_data.md
index b3bdaedd60a..94ae90273d0 100644
--- a/doc/development/service_ping/usage_data.md
+++ b/doc/development/service_ping/usage_data.md
@@ -1,69 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/service_ping/usage_data.md'
+remove_date: '2023-08-20'
---
-# Usage Data Metrics guide
+This document was moved to [another location](../internal_analytics/service_ping/usage_data.md).
-This guide describes deprecated usage for metrics in `usage_data.rb`.
-
-NOTE:
-Implementing metrics direct in `usage_data.rb` is deprecated, We recommend you use [instrumentation classes](metrics_instrumentation.md).
-
-## Ordinary batch counters
-
-Simple count of a given `ActiveRecord_Relation`, does a non-distinct batch count, smartly reduces `batch_size`, and handles errors.
-Handles the `ActiveRecord::StatementInvalid` error.
-
-Method:
-
-```ruby
-count(relation, column = nil, batch: true, start: nil, finish: nil)
-```
-
-Arguments:
-
-- `relation` the ActiveRecord_Relation to perform the count
-- `column` the column to perform the count on, by default is the primary key
-- `batch`: default `true` to use batch counting
-- `start`: custom start of the batch counting to avoid complex min calculations
-- `end`: custom end of the batch counting to avoid complex min calculations
-
-Examples:
-
-```ruby
-count(User.active)
-count(::Clusters::Cluster.aws_installed.enabled, :cluster_id)
-count(::Clusters::Cluster.aws_installed.enabled, :cluster_id, start: ::Clusters::Cluster.minimum(:id), finish: ::Clusters::Cluster.maximum(:id))
-```
-
-## Distinct batch counters
-
-Distinct count of a given `ActiveRecord_Relation` on given column, a distinct batch count, smartly reduces `batch_size`, and handles errors.
-Handles the `ActiveRecord::StatementInvalid` error.
-
-Method:
-
-```ruby
-distinct_count(relation, column = nil, batch: true, batch_size: nil, start: nil, finish: nil)
-```
-
-Arguments:
-
-- `relation`: the ActiveRecord_Relation to perform the count
-- `column`: the column to perform the distinct count, by default is the primary key
-- `batch`: default `true` to use batch counting
-- `batch_size`: if none set it uses default value 10000 from `Gitlab::Database::BatchCounter`
-- `start`: custom start of the batch counting to avoid complex min calculations
-- `end`: custom end of the batch counting to avoid complex min calculations
-
-WARNING:
-Counting over non-unique columns can lead to performance issues. For more information, see the [iterating tables in batches](../database/iterating_tables_in_batches.md) guide.
-
-Examples:
-
-```ruby
-distinct_count(::Project, :creator_id)
-distinct_count(::Note.with_suggestions.where(time_period), :author_id, start: ::User.minimum(:id), finish: ::User.maximum(:id))
-```
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/snowplow/event_dictionary_guide.md b/doc/development/snowplow/event_dictionary_guide.md
index 6e8947e0210..2bea681bf59 100644
--- a/doc/development/snowplow/event_dictionary_guide.md
+++ b/doc/development/snowplow/event_dictionary_guide.md
@@ -1,91 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/snowplow/event_dictionary_guide.md'
+remove_date: '2023-08-20'
---
-# Event dictionary guide
+This document was moved to [another location](../internal_analytics/snowplow/event_dictionary_guide.md).
-NOTE:
-The event dictionary is a work in progress, and this process is subject to change.
-
-This guide describes the event dictionary and how it's implemented.
-
-## Event definition and validation
-
-This process is meant to document all Snowplow events and ensure consistency. Every Snowplow event needs to have such a definition. Event definitions must comply with the [JSON Schema](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/events/schema.json).
-
-All event definitions are stored in the following directories:
-
-- [`config/events`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/config/events)
-- [`ee/config/events`](https://gitlab.com/gitlab-org/gitlab/-/tree/master/ee/config/events)
-
-Each event is defined in a separate YAML file consisting of the following fields:
-
-| Field | Required | Additional information |
-|------------------------|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `description` | yes | A description of the event. |
-| `category` | yes | The event category (see [Event schema](index.md#event-schema)). |
-| `action` | yes | The event action (see [Event schema](index.md#event-schema)). |
-| `label_description` | no | A description of the event label (see [Event schema](index.md#event-schema)). |
-| `property_description` | no | A description of the event property (see [Event schema](index.md#event-schema)). |
-| `value_description` | no | A description of the event value (see [Event schema](index.md#event-schema)). |
-| `extra_properties` | no | The type and description of each extra property sent with the event. |
-| `identifiers` | no | A list of identifiers sent with the event. Can be set to one or more of `project`, `user`, or `namespace`. |
-| `iglu_schema_url` | no | The URL to the custom schema sent with the event, for example, `iglu:com.gitlab/gitlab_experiment/jsonschema/1-0-0`. |
-| `product_section` | yes | The [section](https://gitlab.com/gitlab-com/www-gitlab-com/-/blob/master/data/sections.yml). |
-| `product_stage` | no | The [stage](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) for the event. |
-| `product_group` | yes | The [group](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/stages.yml) that owns the event. |
-| `milestone` | no | The milestone when the event is introduced. |
-| `introduced_by_url` | no | The URL to the merge request that introduced the event. |
-| `distributions` | yes | The [distributions](https://about.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/#definitions) where the tracked feature is available. Can be set to one or more of `ce` or `ee`. |
-| `tiers` | yes | The [tiers](https://about.gitlab.com/handbook/marketing/brand-and-product-marketing/product-and-solution-marketing/tiers/) where the tracked feature is available. Can be set to one or more of `free`, `premium`, or `ultimate`. |
-
-### Example event definition
-
-The linked [`uuid`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/config/events/epics_promote.yml)
-YAML file includes an example event definition.
-
-```yaml
-description: Issue promoted to epic
-category: epics
-action: promote
-property_description: The string "issue_id"
-value_description: ID of the issue
-extra_properties:
- weight:
- type: integer
- description: Weight of the issue
-identifiers:
-- project
-- user
-- namespace
-product_section: dev
-product_stage: plan
-product_group: group::product planning
-milestone: "11.10"
-introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/10537
-distributions:
-- ee
-tiers:
-- premium
-- ultimate
-```
-
-## Create a new event definition
-
-Use the dedicated [event definition generator](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/generators/gitlab/snowplow_event_definition_generator.rb)
-to create new event definitions.
-
-The `category` and `action` of each event are included in the filename to standardize file naming.
-
-The generator takes three options:
-
-- `--ee`: Indicates if the event is for EE.
-- `--category=CATEGORY`: Indicates the `category` of the event.
-- `--action=ACTION`: Indicates the `action` of the event.
-
-```shell
-bundle exec rails generate gitlab:snowplow_event_definition --category Groups::EmailCampaignsController --action click
-create create config/events/groups__email_campaigns_controller_click.yml
-```
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/snowplow/implementation.md b/doc/development/snowplow/implementation.md
index 2661df8afd8..a9e4e252a53 100644
--- a/doc/development/snowplow/implementation.md
+++ b/doc/development/snowplow/implementation.md
@@ -1,523 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/snowplow/implementation.md'
+remove_date: '2023-08-20'
---
-# Implement Snowplow tracking
+This document was moved to [another location](../internal_analytics/snowplow/implementation.md).
-This page describes how to:
-
-- Implement Snowplow frontend and backend tracking
-- Test Snowplow events
-
-## Event definitions
-
-Every Snowplow event, regardless of frontend or backend, requires a corresponding event definition. These definitions document the event and its properties to make it easier to maintain and analyze.
-These definitions can be browsed in the [event dictionary](https://metrics.gitlab.com/snowplow/). The [event dictionary guide](event_dictionary_guide.md) provides instructions for setting up an event definition.
-
-## Snowplow JavaScript frontend tracking
-
-GitLab provides a `Tracking` interface that wraps the [Snowplow JavaScript tracker](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/javascript-trackers/)
-to track custom events.
-
-For the recommended frontend tracking implementation, see [Usage recommendations](#usage-recommendations).
-
-Structured events and page views include the [`gitlab_standard`](schemas.md#gitlab_standard)
-context, using the `window.gl.snowplowStandardContext` object which includes
-[default data](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/views/layouts/_snowplow.html.haml)
-as base:
-
-| Property | Example |
-| -------- | ------- |
-| `context_generated_at` | `"2022-01-01T01:00:00.000Z"` |
-| `environment` | `"production"` |
-| `extra` | `{}` |
-| `namespace_id` | `123` |
-| `plan` | `"gold"` |
-| `project_id` | `456` |
-| `source` | `"gitlab-rails"` |
-| `user_id` | `789`* |
-| `is_gitlab_team_member` | `true`|
-
-_\* Undergoes a pseudonymization process at the collector level._
-
-These properties [are overridden](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/tracking/get_standard_context.js)
-with frontend-specific values, like `source` (`gitlab-javascript`), `google_analytics_id`
-and the custom `extra` object. You can modify this object for any subsequent
-structured event that fires, although this is not recommended.
-
-Tracking implementations must have an `action` and a `category`. You can provide additional
-properties from the [event schema](index.md#event-schema), in
-addition to an `extra` object that accepts key-value pairs.
-
-| Property | Type | Default value | Description |
-|:-----------|:-------|:---------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `category` | string | `document.body.dataset.page` | Page or subsection of a page in which events are captured. |
-| `action` | string | `'generic'` | Action the user is taking. Clicks must be `click` and activations must be `activate`. For example, focusing a form field is `activate_form_input`, and clicking a button is `click_button`. |
-| `data` | object | `{}` | Additional data such as `label`, `property`, `value` as described in [Event schema](index.md#event-schema), `context` for custom contexts, and `extra` (key-value pairs object). |
-
-### Usage recommendations
-
-- Use [data attributes](#implement-data-attribute-tracking) on HTML elements that emit `click`, `show.bs.dropdown`, or `hide.bs.dropdown` events.
-- Use the [Vue mixin](#implement-vue-component-tracking) for tracking custom events, or if the supported events for data attributes are not propagating. For example, clickable components that don't emit `click`.
-- Use the [tracking class](#implement-raw-javascript-tracking) when tracking in vanilla JavaScript files.
-
-### Implement data attribute tracking
-
-To implement tracking for HAML or Vue templates, add a [`data-track` attribute](#data-track-attributes) to the element.
-
-The following example shows `data-track-*` attributes assigned to a button:
-
-```haml
-%button.btn{ data: { track_action: "click_button", track_label: "template_preview", track_property: "my-template" } }
-```
-
-```html
-<button class="btn"
- data-track-action="click_button"
- data-track-label="template_preview"
- data-track-property="my-template"
- data-track-extra='{ "template_variant": "primary" }'
-/>
-```
-
-#### `data-track` attributes
-
-| Attribute | Required | Description |
-|:----------------------|:---------|:------------|
-| `data-track-action` | true | Action the user is taking. Clicks must be prepended with `click` and activations must be prepended with `activate`. For example, focusing a form field is `activate_form_input` and clicking a button is `click_button`. Replaces `data-track-event`, which was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/290962) in GitLab 13.11. |
-| `data-track-label` | false | The specific element or object to act on. This can be: the label of the element, for example, a tab labeled 'Create from template' for `create_from_template`; a unique identifier if no text is available, for example, `groups_dropdown_close` for closing the Groups dropdown list; or the name or title attribute of a record being created. |
-| `data-track-property` | false | Any additional property of the element, or object being acted on. |
-| `data-track-value` | false | Describes a numeric value (decimal) directly related to the event. This could be the value of an input. For example, `10` when clicking `internal` visibility. If omitted, this is the element's `value` property or `undefined`. For checkboxes, the default value is the element's checked attribute or `0` when unchecked. The value is parsed as numeric before sending the event. |
-| `data-track-extra` | false | A key-value pair object passed as a valid JSON string. This attribute is added to the `extra` property in our [`gitlab_standard`](schemas.md#gitlab_standard) schema. |
-| `data-track-context` | false | To append a custom context object, passed as a valid JSON string. |
-
-#### Event listeners
-
-Event listeners bind at the document level to handle click events in elements with data attributes.
-This allows them to be handled when the DOM re-renders or changes. Document-level binding reduces
-the likelihood that click events stop propagating up the DOM tree.
-
-If click events stop propagating, you must implement listeners and [Vue component tracking](#implement-vue-component-tracking) or [raw JavaScript tracking](#implement-raw-javascript-tracking).
-
-#### Helper methods
-
-You can use the following Ruby helpers:
-
-```ruby
-tracking_attrs(label, action, property) # { data: { track_label... } }
-
-tracking_attrs_data(label, action, property) # { track_label... }
-```
-
-You can also use it on HAML templates:
-
-```haml
-%button{ **tracking_attrs('main_navigation', 'click_button', 'navigation') }
-
-// When merging with additional data
-// %button{ data: { platform: "...", **tracking_attrs_data('main_navigation', 'click_button', 'navigation') } }
-```
-
-If you use the GitLab helper method [`nav_link`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/helpers/tab_helper.rb#L76), you must wrap `html_options` under the `html_options` keyword argument. If you
-use the `ActionView` helper method [`link_to`](https://api.rubyonrails.org/classes/ActionView/Helpers/UrlHelper.html#method-i-link_to), you don't need to wrap `html_options`.
-
-```ruby
-# Bad
-= nav_link(controller: ['dashboard/groups', 'explore/groups'], data: { track_label: "explore_groups",
-track_action: "click_button" })
-
-# Good
-= nav_link(controller: ['dashboard/groups', 'explore/groups'], html_options: { data: { track_label:
-"explore_groups", track_action: "click_button" } })
-
-# Good (other helpers)
-= link_to explore_groups_path, title: _("Explore"), data: { track_label: "explore_groups", track_action:
-"click_button" }
-```
-
-### Implement Vue component tracking
-
-For custom event tracking, use the [Vue mixin](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/tracking/tracking.js#L207). It exposes `Tracking.event` as the `track` method.
-You can specify tracking options by creating a `tracking` data object or
-computed property, and as a second parameter: `this.track('click_button', opts)`.
-These options override any defaults and allow the values to be dynamic from props or based on state:
-
-| Property | Type | Default | Example |
-| -- | -- | -- | -- |
-| `category` | string | `document.body.dataset.page` | `'code_quality_walkthrough'` |
-| `label` | string | `''` | `'process_start_button'` |
-| `property` | string | `''` | `'asc'` or `'desc'` |
-| `value` | integer | `undefined` | `0`, `1`, `500` |
-| `extra` | object | `{}` | `{ selectedVariant: this.variant }` |
-
-To implement Vue component tracking:
-
-1. Import the `Tracking` library and call the `mixin` method:
-
- ```javascript
- import Tracking from '~/tracking';
-
- const trackingMixin = Tracking.mixin();
-
- // Optionally provide default properties
- // const trackingMixin = Tracking.mixin({ label: 'right_sidebar' });
- ```
-
-1. Use the mixin in the component:
-
- ```javascript
- export default {
- mixins: [trackingMixin],
- // Or
- // mixins: [Tracking.mixin()],
- // mixins: [Tracking.mixin({ label: 'right_sidebar' })],
-
- data() {
- return {
- expanded: false,
- };
- },
- };
- ```
-
-1. You can specify tracking options in by creating a `tracking` data object
-or computed property:
-
- ```javascript
- export default {
- name: 'RightSidebar',
-
- mixins: [Tracking.mixin()],
-
- data() {
- return {
- expanded: false,
- variant: '',
- tracking: {
- label: 'right_sidebar',
- // property: '',
- // value: '',
- // experiment: '',
- // extra: {},
- },
- };
- },
-
- // Or
- // computed: {
- // tracking() {
- // return {
- // property: this.variant,
- // extra: { expanded: this.expanded },
- // };
- // },
- // },
- };
- ```
-
-1. Call the `track` method. Tracking options can be passed as the second parameter:
-
- ```javascript
- this.track('click_button', {
- label: 'right_sidebar',
- });
- ```
-
- Or use the `track` method in the template:
-
- ```html
- <template>
- <div>
- <button data-testid="toggle" @click="toggle">Toggle</button>
-
- <div v-if="expanded">
- <p>Hello world!</p>
- <button @click="track('click_button')">Track another event</button>
- </div>
- </div>
- </template>
- ```
-
-#### Testing example
-
-```javascript
-export default {
- name: 'CountDropdown',
-
- mixins: [Tracking.mixin({ label: 'count_dropdown' })],
-
- data() {
- return {
- variant: 'counter',
- count: 0,
- };
- },
-
- methods: {
- handleChange({ target }) {
- const { variant } = this;
-
- this.count = Number(target.value);
-
- this.track('change_value', {
- value: this.count,
- extra: { variant }
- });
- },
- },
-};
-```
-
-```javascript
-import { mockTracking } from 'helpers/tracking_helper';
-// mockTracking(category, documentOverride, spyMethod)
-
-describe('CountDropdown.vue', () => {
- let trackingSpy;
- let wrapper;
-
- ...
-
- beforeEach(() => {
- trackingSpy = mockTracking(undefined, wrapper.element, jest.spyOn);
- });
-
- const findDropdown = () => wrapper.find('[data-testid="dropdown"]');
-
- it('tracks change event', () => {
- const dropdown = findDropdown();
- dropdown.element.value = 30;
- dropdown.trigger('change');
-
- expect(trackingSpy).toHaveBeenCalledWith(undefined, 'change_value', {
- value: 30,
- label: 'count_dropdown',
- extra: { variant: 'counter' },
- });
- });
-});
-```
-
-### Implement raw JavaScript tracking
-
-To track from a vanilla JavaScript file, use the `Tracking.event` static function
-(calls [`dispatchSnowplowEvent`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/assets/javascripts/tracking/dispatch_snowplow_event.js)).
-
-The following example demonstrates tracking a click on a button by manually calling `Tracking.event`.
-
-```javascript
-import Tracking from '~/tracking';
-
-const button = document.getElementById('create_from_template_button');
-
-button.addEventListener('click', () => {
- Tracking.event(undefined, 'click_button', {
- label: 'create_from_template',
- property: 'template_preview',
- extra: {
- templateVariant: 'primary',
- valid: 1,
- },
- });
-});
-```
-
-#### Testing example
-
-```javascript
-import Tracking from '~/tracking';
-
-describe('MyTracking', () => {
- let wrapper;
-
- beforeEach(() => {
- jest.spyOn(Tracking, 'event');
- });
-
- const findButton = () => wrapper.find('[data-testid="create_from_template"]');
-
- it('tracks event', () => {
- findButton().trigger('click');
-
- expect(Tracking.event).toHaveBeenCalledWith(undefined, 'click_button', {
- label: 'create_from_template',
- property: 'template_preview',
- extra: {
- templateVariant: 'primary',
- valid: true,
- },
- });
- });
-});
-```
-
-### Form tracking
-
-To enable Snowplow automatic [form tracking](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/javascript-trackers/javascript-tracker/javascript-tracker-v2/tracking-specific-events/#form-tracking):
-
-1. Call `Tracking.enableFormTracking` when the DOM is ready.
-1. Provide a `config` object that includes at least one of the following elements:
- - `forms` determines the forms to track. Identified by the CSS class name.
- - `fields` determines the fields inside the tracked forms to track. Identified by the field `name`.
-1. Optional. Provide a list of contexts as the second argument. The [`gitlab_standard`](schemas.md#gitlab_standard) schema is excluded from these events.
-
-```javascript
-Tracking.enableFormTracking({
- forms: { allow: ['sign-in-form', 'password-recovery-form'] },
- fields: { allow: ['terms_and_conditions', 'newsletter_agreement'] },
-});
-```
-
-#### Testing example
-
-```javascript
-import Tracking from '~/tracking';
-
-describe('MyFormTracking', () => {
- let formTrackingSpy;
-
- beforeEach(() => {
- formTrackingSpy = jest
- .spyOn(Tracking, 'enableFormTracking')
- .mockImplementation(() => null);
- });
-
- it('initialized with the correct configuration', () => {
- expect(formTrackingSpy).toHaveBeenCalledWith({
- forms: { allow: ['sign-in-form', 'password-recovery-form'] },
- fields: { allow: ['terms_and_conditions', 'newsletter_agreement'] },
- });
- });
-});
-```
-
-## Implement Ruby backend tracking
-
-`Gitlab::Tracking` is an interface that wraps the [Snowplow Ruby Tracker](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/ruby-tracker/) for tracking custom events.
-Backend tracking provides:
-
-- User behavior tracking
-- Instrumentation to monitor and visualize performance over time in a section or aspect of code.
-
-To add custom event tracking and instrumentation, call the `GitLab::Tracking.event` class method.
-For example:
-
-```ruby
-class Projects::CreateService < BaseService
- def execute
- project = Project.create(params)
-
- Gitlab::Tracking.event('Projects::CreateService', 'create_project', label: project.errors.full_messages.to_sentence,
- property: project.valid?.to_s, project: project, user: current_user, namespace: namespace)
- end
-end
-```
-
-Use the following arguments:
-
-| Argument | Type | Default value | Description |
-|------------|---------------------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------|
-| `category` | String | | Area or aspect of the application. For example, `HealthCheckController` or `Lfs::FileTransformer`. |
-| `action` | String | | The action being taken. For example, a controller action such as `create`, or an Active Record callback. |
-| `label` | String | `nil` | The specific element or object to act on. This can be one of the following: the label of the element, for example, a tab labeled 'Create from template' for `create_from_template`; a unique identifier if no text is available, for example, `groups_dropdown_close` for closing the Groups dropdown list; or the name or title attribute of a record being created. |
-| `property` | String | `nil` | Any additional property of the element, or object being acted on. |
-| `value` | Numeric | `nil` | Describes a numeric value (decimal) directly related to the event. This could be the value of an input. For example, `10` when clicking `internal` visibility. |
-| `context` | Array\[SelfDescribingJSON\] | `nil` | An array of custom contexts to send with this event. Most events should not have any custom contexts. |
-| `project` | Project | `nil` | The project associated with the event. |
-| `user` | User | `nil` | The user associated with the event. This value undergoes a pseudonymization process at the collector level. |
-| `namespace` | Namespace | `nil` | The namespace associated with the event. |
-| `extra` | Hash | `{}` | Additional keyword arguments are collected into a hash and sent with the event. |
-
-### Unit testing
-
-To test backend Snowplow events, use the `expect_snowplow_event` helper. For more information, see
-[testing best practices](../testing_guide/best_practices.md#test-snowplow-events).
-
-### Performance
-
-We use the [AsyncEmitter](https://snowplow.github.io/snowplow-ruby-tracker/SnowplowTracker/AsyncEmitter.html) when tracking events, which allows for instrumentation calls to be run in a background thread. This is still an active area of development.
-
-## Develop and test Snowplow
-
-To develop and test a Snowplow event, there are several tools to test frontend and backend events:
-
-| Testing Tool | Frontend Tracking | Backend Tracking | Local Development Environment | Production Environment | Production Environment |
-|----------------------------------------------|--------------------|---------------------|-------------------------------|------------------------|------------------------|
-| Snowplow Analytics Debugger Chrome Extension | Yes | No | Yes | Yes | Yes |
-| Snowplow Inspector Chrome Extension | Yes | No | Yes | Yes | Yes |
-| Snowplow Micro | Yes | Yes | Yes | No | No |
-
-### Test frontend events
-
-Before you test frontend events in development, you must:
-
-1. [Enable Snowplow tracking in the Admin Area](index.md#enable-snowplow-tracking).
-1. Turn off ad blockers that could prevent Snowplow JavaScript from loading in your environment.
-1. Turn off "Do Not Track" (DNT) in your browser.
-
-All URLs are pseudonymized. The entity identifier [replaces](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/javascript-trackers/javascript-tracker/javascript-tracker-v2/tracker-setup/other-parameters-2/#setting-a-custom-page-url-and-referrer-url) personally identifiable
-information (PII). PII includes usernames, group, and project names.
-Page titles are hardcoded as `GitLab` for the same reason.
-
-#### Snowplow Analytics Debugger Chrome Extension
-
-[Snowplow Analytics Debugger](https://www.iglooanalytics.com/blog/snowplow-analytics-debugger-chrome-extension.html) is a browser extension for testing frontend events. It works in production, staging, and local development environments.
-
-1. Install the [Snowplow Analytics Debugger](https://chrome.google.com/webstore/detail/snowplow-analytics-debugg/jbnlcgeengmijcghameodeaenefieedm) Chrome browser extension.
-1. Open Chrome DevTools to the Snowplow Analytics Debugger tab.
-
-#### Snowplow Inspector Chrome Extension
-
-Snowplow Inspector Chrome Extension is a browser extension for testing frontend events. This works in production, staging, and local development environments.
-
-<i class="fa fa-youtube-play youtube" aria-hidden="true"></i>
-For a video tutorial, see the [Snowplow plugin walk through](https://www.youtube.com/watch?v=g4rqnIZ1Mb4).
-
-1. Install [Snowplow Inspector](https://chrome.google.com/webstore/detail/snowplow-inspector/maplkdomeamdlngconidoefjpogkmljm?hl=en).
-1. To open the extension, select the Snowplow Inspector icon beside the address bar.
-1. Click around on a webpage with Snowplow to see JavaScript events firing in the inspector window.
-
-### Test backend events with Snowplow Micro
-
-[Snowplow Micro](https://snowplow.io/blog/introducing-snowplow-micro/) is a
-Docker-based solution for testing backend and frontend in a local development environment. Snowplow Micro
-records the same events as the full Snowplow pipeline. To query events, use the Snowplow Micro API.
-
-It can be set up automatically using [GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit).
-See the [how-to docs](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/snowplow_micro.md) for more details.
-
-1. Set the environment variable to tell the GDK to use Snowplow Micro in development. This overrides two `application_settings` options:
- - `snowplow_enabled` setting will instead return `true` from `Gitlab::Tracking.enabled?`
- - `snowplow_collector_hostname` setting will instead always return `localhost:9090` (or whatever port is set for `snowplow_micro.port` GDK setting) from `Gitlab::Tracking.collector_hostname`.
-With Snowplow Micro set up you can now manually test backend Snowplow events:
-
-1. Send a test Snowplow event from the Rails console:
-
- ```ruby
- Gitlab::Tracking.event('category', 'action')
- ```
-
-1. Navigate to `localhost:9090/micro/good` to see the event.
-
-#### Useful links
-
-- [Snowplow Micro repository](https://github.com/snowplow-incubator/snowplow-micro)
-- [Installation guide recording](https://www.youtube.com/watch?v=OX46fo_A0Ag)
-
-### Troubleshoot
-
-To control content security policy warnings when using an external host, modify `config/gitlab.yml`
-to allow or prevent them. To allow them, add the relevant host for `connect_src`. For example, for
-`https://snowplow.trx.gitlab.net`:
-
-```yaml
-development:
- <<: *base
- gitlab:
- content_security_policy:
- enabled: true
- directives:
- connect_src: "'self' http://localhost:* http://127.0.0.1:* ws://localhost:* wss://localhost:* ws://127.0.0.1:* https://snowplow.trx.gitlab.net/"
-```
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/snowplow/index.md b/doc/development/snowplow/index.md
index 97a60b6f1f9..c0e53fe3b1b 100644
--- a/doc/development/snowplow/index.md
+++ b/doc/development/snowplow/index.md
@@ -1,201 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/snowplow/index.md'
+remove_date: '2023-08-20'
---
-# Snowplow development guidelines
+This document was moved to [another location](../internal_analytics/snowplow/index.md).
-Snowplow is an enterprise-grade marketing and Analytics Instrumentation platform that tracks how users engage with our website and application.
-
-[Snowplow](https://snowplow.io/) consists of several loosely-coupled sub-systems:
-
-- **Trackers** fire Snowplow events. Snowplow has twelve trackers that cover web, mobile, desktop, server, and IoT.
-- **Collectors** receive Snowplow events from trackers. We use different event collectors that synchronize events to Amazon S3, Apache Kafka, or Amazon Kinesis.
-- **Enrich** cleans raw Snowplow events, enriches them, and puts them into storage. There is a Hadoop-based enrichment process, and a Kinesis-based or Kafka-based process.
-- **Storage** stores Snowplow events. We store the Snowplow events in a flat file structure on S3, and in the Redshift and PostgreSQL databases.
-- **Data modeling** joins event-level data with other data sets, aggregates them into smaller data sets, and applies business logic. This produces a clean set of tables for data analysis. We use data models for Redshift and Looker.
-- **Analytics** are performed on Snowplow events or on aggregate tables.
-
-![Snowplow flow](../img/snowplow_flow.png)
-
-## Enable Snowplow tracking
-
-Tracking can be enabled at:
-
-- The instance level, which enables tracking on both the frontend and backend layers.
-- The user level. User tracking can be disabled on a per user basis.
- GitLab respects the [Do Not Track](https://www.eff.org/issues/do-not-track) standard, so any user who has enabled the Do Not Track option in their browser is not tracked at a user level.
-
-Snowplow tracking is configured to send data for GitLab.com to a collector configured by GitLab. By default, self-managed
-instances do not have a collector configured and do not collect data via Snowplow.
-
-You can configure your self-managed GitLab instance to use a custom Snowplow collector.
-
-1. On the left sidebar, expand the top-most chevron (**{chevron-down}**).
-1. Select **Admin Area**.
-1. On the left sidebar, select **Settings > General**.
-1. Expand **Snowplow**.
-1. Select **Enable Snowplow tracking** and enter your Snowplow configuration information. For example:
-
- | Name | Value |
- |--------------------|-------------------------------|
- | Collector hostname | `your-snowplow-collector.net` |
- | App ID | `gitlab` |
- | Cookie domain | `.your-gitlab-instance.com` |
-
-1. Select **Save changes**.
-
-## Snowplow request flow
-
-The following example shows a basic request/response flow between the following components:
-
-- Snowplow JS / Ruby Trackers on GitLab.com
-- [GitLab.com Snowplow Collector](https://gitlab.com/gitlab-com/gl-infra/readiness/-/blob/master/library/snowplow/index.md)
-- The GitLab S3 Bucket
-- The GitLab Snowflake Data Warehouse
-- Sisense:
-
-```mermaid
-sequenceDiagram
- participant Snowplow JS (Frontend)
- participant Snowplow Ruby (Backend)
- participant GitLab.com Snowplow Collector
- participant S3 Bucket
- participant Snowflake DW
- participant Sisense Dashboards
- Snowplow JS (Frontend) ->> GitLab.com Snowplow Collector: FE Tracking event
- Snowplow Ruby (Backend) ->> GitLab.com Snowplow Collector: BE Tracking event
- loop Process using Kinesis Stream
- GitLab.com Snowplow Collector ->> GitLab.com Snowplow Collector: Log raw events
- GitLab.com Snowplow Collector ->> GitLab.com Snowplow Collector: Enrich events
- GitLab.com Snowplow Collector ->> GitLab.com Snowplow Collector: Write to disk
- end
- GitLab.com Snowplow Collector ->> S3 Bucket: Kinesis Firehose
- Note over GitLab.com Snowplow Collector, S3 Bucket: Pseudonymization
- S3 Bucket->>Snowflake DW: Import data
- Snowflake DW->>Snowflake DW: Transform data using dbt
- Snowflake DW->>Sisense Dashboards: Data available for querying
-```
-
-For more details about the architecture, see [Snowplow infrastructure](infrastructure.md).
-
-## Event schema
-
-All the events must be consistent. If each feature captures events differently, it can be difficult
-to perform analysis.
-
-Each event provides attributes that describe the event.
-
-| Attribute | Type | Required | Description |
-| --------- | ------- | -------- | ----------- |
-| category | text | true | The page or backend section of the application. Unless infeasible, use the Rails page attribute by default in the frontend, and namespace + class name on the backend, for example, `Notes::CreateService`. |
-| action | text | true | The action the user takes, or aspect that's being instrumented. The first word must describe the action or aspect. For example, clicks must be `click`, activations must be `activate`, creations must be `create`. Use underscores to describe what was acted on. For example, activating a form field is `activate_form_input`, an interface action like clicking on a dropdown list is `click_dropdown`, a behavior like creating a project record from the backend is `create_project`. |
-| label | text | false | The specific element or object to act on. This can be one of the following: the label of the element, for example, a tab labeled 'Create from template' for `create_from_template`; a unique identifier if no text is available, for example, `groups_dropdown_close` for closing the Groups dropdown list; or the name or title attribute of a record being created. For Service Ping metrics adapted to Snowplow events, this should be the full metric [key path](../service_ping/metrics_dictionary.md#metric-key_path) taken from its definition file. |
-| property | text | false | Any additional property of the element, or object being acted on. For Service Ping metrics adapted to Snowplow events, this should be additional information or context that can help analyze the event. For example, in the case of `usage_activity_by_stage_monthly.create.merge_requests_users`, there are four different possible merge request actions: "create", "merge", "comment", and "close". Each of these would be a possible property value. |
-| value | decimal | false | Describes a numeric value (decimal) directly related to the event. This could be the value of an input. For example, `10` when clicking `internal` visibility. |
-| context | vector | false | Additional data in the form of a [self-describing JSON](https://docs.snowplow.io/docs/pipeline-components-and-applications/iglu/common-architecture/self-describing-json-schemas/) to describe the event if the attributes are not sufficient. Each context must have its schema defined to assure data integrity. Refer to the list of GitLab-defined contexts for more details. |
-
-### Examples
-
-| Category* | Label | Action | Property** | Value |
-|-------------|------------------|-----------------------|----------|:-----:|
-| `[root:index]` | `main_navigation` | `click_navigation_link` | `[link_label]` | - |
-| `[groups:boards:show]` | `toggle_swimlanes` | `click_toggle_button` | - | `[is_active]` |
-| `[projects:registry:index]` | `registry_delete` | `click_button` | - | - |
-| `[projects:registry:index]` | `registry_delete` | `confirm_deletion` | - | - |
-| `[projects:blob:show]` | `congratulate_first_pipeline` | `click_button` | `[human_access]` | - |
-| `[projects:clusters:new]` | `chart_options` | `generate_link` | `[chart_link]` | - |
-| `[projects:clusters:new]` | `chart_options` | `click_add_label_button` | `[label_id]` | - |
-| `API::NpmPackages` | `counts.package_events_i_package_push_package_by_deploy_token` | `push_package` | `npm` | - |
-
-_* If you choose to omit the category you can use the default._<br>
-_** Use property for variable strings._
-
-### Reference SQL
-
-#### Last 20 `reply_comment_button` events
-
-```sql
-SELECT
- session_id,
- event_id,
- event_label,
- event_action,
- event_property,
- event_value,
- event_category,
- contexts
-FROM legacy.snowplow_structured_events_all
-WHERE
- event_label = 'reply_comment_button'
- AND event_action = 'click_button'
- -- AND event_category = 'projects:issues:show'
- -- AND event_value = 1
-ORDER BY collector_tstamp DESC
-LIMIT 20
-```
-
-#### Last 100 page view events
-
-```sql
-SELECT
- -- page_url,
- -- page_title,
- -- referer_url,
- -- marketing_medium,
- -- marketing_source,
- -- marketing_campaign,
- -- browser_window_width,
- -- device_is_mobile
- *
-FROM legacy.snowplow_page_views_30
-ORDER BY page_view_start DESC
-LIMIT 100
-```
-
-#### Top 20 users who fired `reply_comment_button` in the last 30 days
-
-```sql
-SELECT
- count(*) as hits,
- se_action,
- se_category,
- gsc_pseudonymized_user_id
-FROM legacy.snowplow_gitlab_events_30
-WHERE
- se_label = 'reply_comment_button'
- AND gsc_pseudonymized_user_id IS NOT NULL
-GROUP BY gsc_pseudonymized_user_id, se_category, se_action
-ORDER BY count(*) DESC
-LIMIT 20
-```
-
-#### Query JSON formatted data
-
-```sql
-SELECT
- derived_tstamp,
- contexts:data[0]:data:extra:old_format as CURRENT_FORMAT,
- contexts:data[0]:data:extra:value as UPDATED_FORMAT
-FROM legacy.snowplow_structured_events_all
-WHERE event_action in ('wiki_format_updated')
-ORDER BY derived_tstamp DESC
-LIMIT 100
-```
-
-### Web-specific parameters
-
-Snowplow JavaScript adds [web-specific parameters](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/snowplow-tracker-protocol/#Web-specific_parameters) to all web events by default.
-
-## Related topics
-
-- [Snowplow data structure](https://docs.snowplow.io/docs/understanding-your-pipeline/canonical-event/)
-- [Our Iglu schema registry](https://gitlab.com/gitlab-org/iglu)
-- [List of events used in our codebase (Event Dictionary)](https://metrics.gitlab.com/snowplow/)
-- [Analytics Instrumentation Guide](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/)
-- [Service Ping Guide](../service_ping/index.md)
-- [Analytics Instrumentation Direction](https://about.gitlab.com/direction/analytics/analytics-instrumentation/)
-- [Data Analysis Process](https://about.gitlab.com/handbook/business-technology/data-team/#data-analysis-process/)
-- [Data for Product Managers](https://about.gitlab.com/handbook/business-technology/data-team/programs/data-for-product-managers/)
-- [Data Infrastructure](https://about.gitlab.com/handbook/business-technology/data-team/platform/infrastructure/)
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/snowplow/infrastructure.md b/doc/development/snowplow/infrastructure.md
index 9679abac6b7..6374af40ffe 100644
--- a/doc/development/snowplow/infrastructure.md
+++ b/doc/development/snowplow/infrastructure.md
@@ -1,101 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/snowplow/infrastructure.md'
+remove_date: '2023-08-20'
---
-# Snowplow infrastructure
+This document was moved to [another location](../internal_analytics/snowplow/infrastructure.md).
-Snowplow events on GitLab SaaS fired by a [tracker](implementation.md) go through an AWS pipeline, managed by GitLab.
-
-## Event flow in the AWS pipeline
-
-Every event goes through a collector, enricher, and pseudonymization lambda. The event is then dumped to S3 storage where it can be picked up by the Snowflake data warehouse.
-
-Deploying and managing the infrastructure is automated using Terraform in the current [Terraform repository](https://gitlab.com/gitlab-com/gl-infra/config-mgmt/-/tree/master/environments/aws-snowplow).
-
-```mermaid
-graph LR
- GL[GitLab.com]-->COL
-
- subgraph aws-cloud[AWS]
- COL[Collector]-->|snowplow-raw-good|ENR
- COL[Collector]-->|snowplow-raw-bad|FRBE
- subgraph firehoserbe[Firehose]
- FRBE[AWS Lambda]
- end
- FRBE-->S3RBE
-
- ENR[Enricher]-->|snowplow-enriched-bad|FEBE
- subgraph firehoseebe[Firehose]
- FEBE[AWS Lambda]
- end
- FEBE-->S3EBE
-
- ENR[Enricher]-->|snowplow-enriched-good|FRGE
- subgraph firehosege[Firehose]
- FRGE[AWS Lambda]
- end
- FRGE-->S3GE
- end
-
- subgraph snowflake[Data warehouse]
- S3RBE[S3 raw-bad]-->BE[gitlab_bad_events]
- S3EBE[S3 enriched-bad]-->BE[gitlab_bad_events]
- S3GE[S3 output]-->GE[gitlab_events]
- end
-```
-
-See [Snowplow technology 101](https://github.com/snowplow/snowplow/#snowplow-technology-101) for Snowplow's own documentation and an overview how collectors and enrichers work.
-
-### Pseudonymization
-
-In contrast to a typical Snowplow pipeline, after enrichment, GitLab Snowplow events go through a [pseudonymization service](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/snowplow-pseudonymization) in the form of an AWS Lambda service before they are stored in S3 storage.
-
-#### Why events need to be pseudonymized
-
-GitLab is bound by its [obligations to community](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/service-usage-data-commitment/)
-and by [legal regulations](https://about.gitlab.com/handbook/legal/privacy/services-usage-data/) to protect the privacy of its users.
-
-GitLab must provide valuable insights for business decisions, and there is a need
-for a better understanding of different users' behavior patterns. The
-pseudonymization process helps you find a compromise between these two requirements.
-
-Pseudonymization processes personally identifiable information inside a Snowplow event in an irreversible fashion
-maintaining deterministic output for given input, while masking any relation to that input.
-
-#### How events are pseudonymized
-
-Pseudonymization uses an allowlist that provides privacy by default. Therefore, each
-attribute received as part of a Snowplow event is pseudonymized unless the attribute
-is an allowed exception.
-
-Pseudonymization is done using the HMAC-SHA256 keyed hash algorithm.
-Attributes are combined with a secret salt to replace each identifiable information with a pseudonym.
-
-### S3 bucket data lake to Snowflake
-
-See [Data team's Snowplow Overview](https://about.gitlab.com/handbook/business-technology/data-team/platform/snowplow/) for further details how data is ingested into our Snowflake data warehouse.
-
-## Monitoring
-
-There are several tools that monitor Snowplow events tracking in different stages of the processing pipeline:
-
-- [Analytics Instrumentation Grafana dashboard](https://dashboards.gitlab.net/d/product-intelligence-main/product-intelligence-product-intelligence?orgId=1) monitors backend events sent from a GitLab.com instance to a collectors fleet. This dashboard provides information about:
- - The number of events that successfully reach Snowplow collectors.
- - The number of events that failed to reach Snowplow collectors.
- - The number of backend events that were sent.
-- [AWS CloudWatch dashboard](https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#dashboards:name=SnowPlow;start=P3D) monitors the state of the events in a processing pipeline. The pipeline starts from Snowplow collectors, goes through to enrichers and pseudonymization, and then up to persistence in an S3 bucket. From S3, the events are imported into the Snowflake Data Warehouse. You must have AWS access rights to view this dashboard. For more information, see [monitoring](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/snowplow-pseudonymization#monitoring) in the Snowplow Events pseudonymization service documentation.
-- [Sisense dashboard](https://app.periscopedata.com/app/gitlab/417669/Snowplow-Summary-Dashboard) provides information about the number of good and bad events imported into the Data Warehouse, in addition to the total number of imported Snowplow events.
-
-For more information, see this [video walk-through](https://www.youtube.com/watch?v=NxPS0aKa_oU).
-
-## Related topics
-
-- [Snowplow technology 101](https://github.com/snowplow/snowplow/#snowplow-technology-101)
-- [Snowplow pseudonymization AWS Lambda project](https://gitlab.com/gitlab-org/analytics-section/analytics-instrumentation/snowplow-pseudonymization)
-- [Analytics Instrumentation Guide](https://about.gitlab.com/handbook/product/analytics-instrumentation-guide/)
-- [Data Infrastructure](https://about.gitlab.com/handbook/business-technology/data-team/platform/infrastructure/)
-- [Snowplow architecture overview (internal)](https://www.youtube.com/watch?v=eVYJjzspsLU)
-- [Snowplow architecture overview slide deck (internal)](https://docs.google.com/presentation/d/16gQEO5CAg8Tx4NBtfnZj-GF4juFI6HfEPWcZgH4Rn14/edit?usp=sharing)
-- [AWS Lambda implementation (internal)](https://youtu.be/cQd0mdMhkQA)
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/snowplow/review_guidelines.md b/doc/development/snowplow/review_guidelines.md
index 07b25f95e13..f4752e08dde 100644
--- a/doc/development/snowplow/review_guidelines.md
+++ b/doc/development/snowplow/review_guidelines.md
@@ -1,44 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/snowplow/review_guidelines.md'
+remove_date: '2023-08-20'
---
-# Snowplow review guidelines
+This document was moved to [another location](../internal_analytics/snowplow/review_guidelines.md).
-This page includes introductory material for an
-[Analytics Instrumentation](https://about.gitlab.com/handbook/engineering/development/analytics/analytics-instrumentation/)
-review, and is specific to Snowplow related reviews. For broader advice and
-general best practices for code reviews, refer to our [code review guide](../code_review.md).
-
-## Resources for reviewers
-
-- [Snowplow Guide](index.md)
-- [Event Dictionary](https://metrics.gitlab.com/snowplow/)
-
-## Review process
-
-We recommend an Analytics Instrumentation review when a merge request (MR) involves changes in
-events or touches Snowplow related files.
-
-### Roles and process
-
-#### The merge request **author** should
-
-- For frontend events, when relevant, add a screenshot of the event in
- the [testing tool](implementation.md#develop-and-test-snowplow) used.
-- For backend events, when relevant, add the output of the
- [Snowplow Micro](implementation.md#test-backend-events-with-snowplow-micro) good events
- `GET http://localhost:9090/micro/good` (it might be a good idea
- to reset with `GET http://localhost:9090/micro/reset` first).
-- Add or update the event definition file according to the [Event Dictionary Guide](event_dictionary_guide.md).
-
-#### The Analytics Instrumentation **reviewer** should
-
-- Check that the [event schema](index.md#event-schema) is correct.
-- Check the [usage recommendations](implementation.md#usage-recommendations).
-- Check that an event definition file was created or updated in accordance with the [Event Dictionary Guide](event_dictionary_guide.md).
-- If needed, check that the events are firing locally using one of the
-[testing tools](implementation.md#develop-and-test-snowplow) available.
-- Approve the MR, and relabel the MR with `~"analytics instrumentation::approved"`.
-- If the snowplow event mirrors a RedisHLL event, then tag @mdrussell to review if the payload is usable for this purpose.
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/snowplow/schemas.md b/doc/development/snowplow/schemas.md
index 21142f68d39..7e00ddd976d 100644
--- a/doc/development/snowplow/schemas.md
+++ b/doc/development/snowplow/schemas.md
@@ -1,190 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/snowplow/schemas.md'
+remove_date: '2023-08-20'
---
-# Snowplow schemas
+This document was moved to [another location](../internal_analytics/snowplow/schemas.md).
-This page provides Snowplow schema reference for GitLab events.
-
-## `gitlab_standard`
-
-We are including the [`gitlab_standard` schema](https://gitlab.com/gitlab-org/iglu/-/blob/master/public/schemas/com.gitlab/gitlab_standard/jsonschema/) for structured events and page views.
-
-The [`StandardContext`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/tracking/standard_context.rb)
-class represents this schema in the application. Some properties are
-[automatically populated for frontend events](implementation.md#snowplow-javascript-frontend-tracking),
-and can be [provided manually for backend events](implementation.md#implement-ruby-backend-tracking).
-
-| Field Name | Required | Default value | Type | Description |
-|-------------------------|:-------------------:|------------------------------|---------------------------|-------------------------------------------------------------------------------------------------------------------------|
-| `project_id` | **{dotted-circle}** | Current project ID * | integer | |
-| `namespace_id` | **{dotted-circle}** | Current group/namespace ID * | integer | |
-| `user_id` | **{dotted-circle}** | Current user ID * | integer | User database record ID attribute. This value undergoes a pseudonymization process at the collector level. |
-| `context_generated_at` | **{dotted-circle}** | Current timestamp | string (date time format) | Timestamp indicating when context was generated. |
-| `environment` | **{check-circle}** | Current environment | string (max 32 chars) | Name of the source environment, such as `production` or `staging` |
-| `source` | **{check-circle}** | Event source | string (max 32 chars) | Name of the source application, such as `gitlab-rails` or `gitlab-javascript` |
-| `plan` | **{dotted-circle}** | Current namespace plan * | string (max 32 chars) | Name of the plan for the namespace, such as `free`, `premium`, or `ultimate`. Automatically picked from the `namespace`. |
-| `google_analytics_id` | **{dotted-circle}** | GA ID value * | string (max 32 chars) | Google Analytics ID, present when set from our marketing sites. |
-| `is_gitlab_team_member` | **{dotted-circle}** | | boolean | Indicates if the events is triggered by a GitLab team member |
-| `extra` | **{dotted-circle}** | | JSON | Any additional data associated with the event, in the form of key-value pairs |
-
-_\* Default value present for frontend events only_
-
-## Default Schema
-
-Frontend events include a [web-specific schema](https://docs.snowplow.io/docs/understanding-your-pipeline/canonical-event/#web-specific-fields) provided by Snowplow.
-All URLs are pseudonymized. The entity identifier [replaces](https://docs.snowplow.io/docs/collecting-data/collecting-from-own-applications/javascript-trackers/javascript-tracker/javascript-tracker-v2/tracker-setup/other-parameters-2/#setting-a-custom-page-url-and-referrer-url) personally identifiable
-information (PII). PII includes usernames, group, and project names.
-Page titles are hardcoded as `GitLab` for the same reason.
-
-| Field Name | Required | Type | Description |
-|--------------------------|---------------------|-----------|----------------------------------------------------------------------------------------------------------------------------------|
-| `app_id` | **{check-circle}** | string | Unique identifier for website / application |
-| `base_currency` | **{dotted-circle}** | string | Reporting currency |
-| `br_colordepth` | **{dotted-circle}** | integer | Browser color depth |
-| `br_cookies` | **{dotted-circle}** | boolean | Does the browser permit cookies? |
-| `br_family` | **{dotted-circle}** | string | Browser family |
-| `br_features_director` | **{dotted-circle}** | boolean | Director plugin installed? |
-| `br_features_flash` | **{dotted-circle}** | boolean | Flash plugin installed? |
-| `br_features_gears` | **{dotted-circle}** | boolean | Google gears installed? |
-| `br_features_java` | **{dotted-circle}** | boolean | Java plugin installed? |
-| `br_features_pdf` | **{dotted-circle}** | boolean | Adobe PDF plugin installed? |
-| `br_features_quicktime` | **{dotted-circle}** | boolean | Quicktime plugin installed? |
-| `br_features_realplayer` | **{dotted-circle}** | boolean | RealPlayer plugin installed? |
-| `br_features_silverlight` | **{dotted-circle}** | boolean | Silverlight plugin installed? |
-| `br_features_windowsmedia` | **{dotted-circle}** | boolean | Windows media plugin installed? |
-| `br_lang` | **{dotted-circle}** | string | Language the browser is set to |
-| `br_name` | **{dotted-circle}** | string | Browser name |
-| `br_renderengine` | **{dotted-circle}** | string | Browser rendering engine |
-| `br_type` | **{dotted-circle}** | string | Browser type |
-| `br_version` | **{dotted-circle}** | string | Browser version |
-| `br_viewheight` | **{dotted-circle}** | string | Browser viewport height |
-| `br_viewwidth` | **{dotted-circle}** | string | Browser viewport width |
-| `collector_tstamp` | **{dotted-circle}** | timestamp | Time stamp for the event recorded by the collector |
-| `contexts` | **{dotted-circle}** | | |
-| `derived_contexts` | **{dotted-circle}** | | Contexts derived in the Enrich process |
-| `derived_tstamp` | **{dotted-circle}** | timestamp | Timestamp making allowance for inaccurate device clock |
-| `doc_charset` | **{dotted-circle}** | string | Web page's character encoding |
-| `doc_height` | **{dotted-circle}** | string | Web page height |
-| `doc_width` | **{dotted-circle}** | string | Web page width |
-| `domain_sessionid` | **{dotted-circle}** | string | Unique identifier (UUID) for this visit of this `user_id` to this domain |
-| `domain_sessionidx` | **{dotted-circle}** | integer | Index of number of visits that this `user_id` has made to this domain (The first visit is `1`) |
-| `domain_userid` | **{dotted-circle}** | string | Unique identifier for a user, based on a first party cookie (so domain specific) |
-| `dvce_created_tstamp` | **{dotted-circle}** | timestamp | Timestamp when event occurred, as recorded by client device |
-| `dvce_ismobile` | **{dotted-circle}** | boolean | Indicates whether device is mobile |
-| `dvce_screenheight` | **{dotted-circle}** | string | Screen / monitor resolution |
-| `dvce_screenwidth` | **{dotted-circle}** | string | Screen / monitor resolution |
-| `dvce_sent_tstamp` | **{dotted-circle}** | timestamp | Timestamp when event was sent by client device to collector |
-| `dvce_type` | **{dotted-circle}** | string | Type of device |
-| `etl_tags` | **{dotted-circle}** | string | JSON of tags for this ETL run |
-| `etl_tstamp` | **{dotted-circle}** | timestamp | Timestamp event began ETL |
-| `event` | **{dotted-circle}** | string | Event type |
-| `event_fingerprint` | **{dotted-circle}** | string | Hash client-set event fields |
-| `event_format` | **{dotted-circle}** | string | Format for event |
-| `event_id` | **{dotted-circle}** | string | Event UUID |
-| `event_name` | **{dotted-circle}** | string | Event name |
-| `event_vendor` | **{dotted-circle}** | string | The company who developed the event model |
-| `event_version` | **{dotted-circle}** | string | Version of event schema |
-| `geo_city` | **{dotted-circle}** | string | City of IP origin |
-| `geo_country` | **{dotted-circle}** | string | Country of IP origin |
-| `geo_latitude` | **{dotted-circle}** | string | An approximate latitude |
-| `geo_longitude` | **{dotted-circle}** | string | An approximate longitude |
-| `geo_region` | **{dotted-circle}** | string | Region of IP origin |
-| `geo_region_name` | **{dotted-circle}** | string | Region of IP origin |
-| `geo_timezone` | **{dotted-circle}** | string | Time zone of IP origin |
-| `geo_zipcode` | **{dotted-circle}** | string | Zip (postal) code of IP origin |
-| `ip_domain` | **{dotted-circle}** | string | Second level domain name associated with the visitor's IP address |
-| `ip_isp` | **{dotted-circle}** | string | Visitor's ISP |
-| `ip_netspeed` | **{dotted-circle}** | string | Visitor's connection type |
-| `ip_organization` | **{dotted-circle}** | string | Organization associated with the visitor's IP address – defaults to ISP name if none is found |
-| `mkt_campaign` | **{dotted-circle}** | string | The campaign ID |
-| `mkt_clickid` | **{dotted-circle}** | string | The click ID |
-| `mkt_content` | **{dotted-circle}** | string | The content or ID of the ad. |
-| `mkt_medium` | **{dotted-circle}** | string | Type of traffic source |
-| `mkt_network` | **{dotted-circle}** | string | The ad network to which the click ID belongs |
-| `mkt_source` | **{dotted-circle}** | string | The company / website where the traffic came from |
-| `mkt_term` | **{dotted-circle}** | string | Keywords associated with the referrer |
-| `name_tracker` | **{dotted-circle}** | string | The tracker namespace |
-| `network_userid` | **{dotted-circle}** | string | Unique identifier for a user, based on a cookie from the collector (so set at a network level and shouldn't be set by a tracker) |
-| `os_family` | **{dotted-circle}** | string | Operating system family |
-| `os_manufacturer` | **{dotted-circle}** | string | Manufacturers of operating system |
-| `os_name` | **{dotted-circle}** | string | Name of operating system |
-| `os_timezone` | **{dotted-circle}** | string | Client operating system time zone |
-| `page_referrer` | **{dotted-circle}** | string | Referrer URL |
-| `page_title` | **{dotted-circle}** | string | To not expose personal identifying information, the page title is hardcoded as `GitLab` |
-| `page_url` | **{dotted-circle}** | string | Page URL |
-| `page_urlfragment` | **{dotted-circle}** | string | Fragment aka anchor |
-| `page_urlhost` | **{dotted-circle}** | string | Host aka domain |
-| `page_urlpath` | **{dotted-circle}** | string | Path to page |
-| `page_urlport` | **{dotted-circle}** | integer | Port if specified, 80 if not |
-| `page_urlquery` | **{dotted-circle}** | string | Query string |
-| `page_urlscheme` | **{dotted-circle}** | string | Scheme (protocol name) |
-| `platform` | **{dotted-circle}** | string | The platform the app runs on |
-| `pp_xoffset_max` | **{dotted-circle}** | integer | Maximum page x offset seen in the last ping period |
-| `pp_xoffset_min` | **{dotted-circle}** | integer | Minimum page x offset seen in the last ping period |
-| `pp_yoffset_max` | **{dotted-circle}** | integer | Maximum page y offset seen in the last ping period |
-| `pp_yoffset_min` | **{dotted-circle}** | integer | Minimum page y offset seen in the last ping period |
-| `refr_domain_userid` | **{dotted-circle}** | string | The Snowplow `domain_userid` of the referring website |
-| `refr_dvce_tstamp` | **{dotted-circle}** | timestamp | The time of attaching the `domain_userid` to the inbound link |
-| `refr_medium` | **{dotted-circle}** | string | Type of referer |
-| `refr_source` | **{dotted-circle}** | string | Name of referer if recognised |
-| `refr_term` | **{dotted-circle}** | string | Keywords if source is a search engine |
-| `refr_urlfragment` | **{dotted-circle}** | string | Referer URL fragment |
-| `refr_urlhost` | **{dotted-circle}** | string | Referer host |
-| `refr_urlpath` | **{dotted-circle}** | string | Referer page path |
-| `refr_urlport` | **{dotted-circle}** | integer | Referer port |
-| `refr_urlquery` | **{dotted-circle}** | string | Referer URL query string |
-| `refr_urlscheme` | **{dotted-circle}** | string | Referer scheme |
-| `se_action` | **{dotted-circle}** | string | The action / event itself |
-| `se_category` | **{dotted-circle}** | string | The category of event |
-| `se_label` | **{dotted-circle}** | string | A label often used to refer to the 'object' the action is performed on |
-| `se_property` | **{dotted-circle}** | string | A property associated with either the action or the object |
-| `se_value` | **{dotted-circle}** | decimal | A value associated with the user action |
-| `ti_category` | **{dotted-circle}** | string | Item category |
-| `ti_currency` | **{dotted-circle}** | string | Currency |
-| `ti_name` | **{dotted-circle}** | string | Item name |
-| `ti_orderid` | **{dotted-circle}** | string | Order ID |
-| `ti_price` | **{dotted-circle}** | decimal | Item price |
-| `ti_price_base` | **{dotted-circle}** | decimal | Item price in base currency |
-| `ti_quantity` | **{dotted-circle}** | integer | Item quantity |
-| `ti_sku` | **{dotted-circle}** | string | Item SKU |
-| `tr_affiliation` | **{dotted-circle}** | string | Transaction affiliation (such as channel) |
-| `tr_city` | **{dotted-circle}** | string | Delivery address: city |
-| `tr_country` | **{dotted-circle}** | string | Delivery address: country |
-| `tr_currency` | **{dotted-circle}** | string | Transaction Currency |
-| `tr_orderid` | **{dotted-circle}** | string | Order ID |
-| `tr_shipping` | **{dotted-circle}** | decimal | Delivery cost charged |
-| `tr_shipping_base` | **{dotted-circle}** | decimal | Shipping cost in base currency |
-| `tr_state` | **{dotted-circle}** | string | Delivery address: state |
-| `tr_tax` | **{dotted-circle}** | decimal | Transaction tax value (such as amount of VAT included) |
-| `tr_tax_base` | **{dotted-circle}** | decimal | Tax applied in base currency |
-| `tr_total` | **{dotted-circle}** | decimal | Transaction total value |
-| `tr_total_base` | **{dotted-circle}** | decimal | Total amount of transaction in base currency |
-| `true_tstamp` | **{dotted-circle}** | timestamp | User-set exact timestamp |
-| `txn_id` | **{dotted-circle}** | string | Transaction ID |
-| `unstruct_event` | **{dotted-circle}** | JSON | The properties of the event |
-| `uploaded_at` | **{dotted-circle}** | | |
-| `user_fingerprint` | **{dotted-circle}** | integer | User identifier based on (hopefully unique) browser features |
-| `user_id` | **{dotted-circle}** | string | Unique identifier for user, set by the business using setUserId |
-| `user_ipaddress` | **{dotted-circle}** | string | IP address |
-| `useragent` | **{dotted-circle}** | string | User agent (expressed as a browser string) |
-| `v_collector` | **{dotted-circle}** | string | Collector version |
-| `v_etl` | **{dotted-circle}** | string | ETL version |
-| `v_tracker` | **{dotted-circle}** | string | Identifier for Snowplow tracker |
-
-## `gitlab_service_ping`
-
-Backend events converted from ServicePing (`redis` and `redis_hll`) must include [ServicePing context](https://gitlab.com/gitlab-org/iglu/-/tree/master/public/schemas/com.gitlab/gitlab_service_ping/jsonschema)
-using the [helper class](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/tracking/service_ping_context.rb).
-
-An example of converted `redis_hll` [event with context](https://gitlab.com/gitlab-org/gitlab/-/edit/master/app/controllers/concerns/product_analytics_tracking.rb#L58).
-
-| Field Name | Required | Type | Description |
-|---------------|:-------------------:|------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| `data_source` | **{check-circle}** | string (max 64 chars) | The `data_source` attribute from the metrics YAML definition. |
-| `event_name`* | **{dotted-circle}** | string (max 128 chars) | When there is a many-to-many relationship between events and metrics, this field contains the name of a Redis event that can be used for aggregations in downstream systems |
-| `key_path`* | **{dotted-circle}** | string (max 256 chars) | The `key_path` attribute from the metrics YAML definition |
-
-_\* Either `event_name` or `key_path` is required_
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/development/snowplow/troubleshooting.md b/doc/development/snowplow/troubleshooting.md
index 885f4e0c16f..ed1f5033239 100644
--- a/doc/development/snowplow/troubleshooting.md
+++ b/doc/development/snowplow/troubleshooting.md
@@ -1,80 +1,11 @@
---
-stage: Analytics
-group: Analytics Instrumentation
-info: To determine the technical writer assigned to the Stage/Group associated with this page, see https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments
+redirect_to: '../internal_analytics/snowplow/troubleshooting.md'
+remove_date: '2023-08-20'
---
-# Troubleshooting Snowplow
+This document was moved to [another location](../internal_analytics/snowplow/troubleshooting.md).
-## Monitoring
-
-This page covers dashboards and alerts coming from a number of internal tools.
-
-For a brief video overview of the tools used to monitor Snowplow usage, please check out [this internal video](https://www.youtube.com/watch?v=NxPS0aKa_oU) (you must be logged into GitLab Unfiltered to view).
-
-## Good events drop
-
-### Symptoms
-
-You will be alarmed via a [Sisense alert](https://app.periscopedata.com/app/gitlab/alert/Volume-of-Snowplow-Good-events/5a5f80ef34fe450da5ebb84eaa84067f/edit) that is sent to `#g_product_intelligence` Slack channel
-
-### Locating the problem
-
-First you need to identify at which stage in Snowplow the data pipeline the drop is occurring.
-Start at [Snowplow dashboard](https://console.aws.amazon.com/systems-manager/resource-groups/cloudwatch?dashboard=SnowPlow&region=us-east-1#) on CloudWatch,
-if you do not have access to CloudWatch you need to create an [access request issue](https://gitlab.com/gitlab-com/team-member-epics/access-requests/-/issues/9730) first.
-While on CloudWatch dashboard set time range to last 4 weeks, to get better picture of system characteristics over time. Than visit following charts:
-
-1. `ELB New Flow Count` and `Collector Auto Scaling Group Network In/Out` - they show in order: number of connections to collectors via load balancers and data volume (in bytes) processed by collectors. If there is drop visible there, it means less events were fired from the GitLab application. Proceed to [application layer guide](#troubleshooting-gitlab-application-layer) for more details
-1. `Firehose Records to S3` - it shows how many event records were saved to S3 bucket, if there was drop on this chart but not on the charts from 1. it means that problem is located at AWS infrastructure layer, please refer to [AWS layer guide](#troubleshooting-aws-layer)
-1. If drop wasn't visible on any of previous charts it means that problem is at data warehouse layer, please refer to [data warehouse layer guide](#troubleshooting-data-warehouse-layer)
-
-### Troubleshooting GitLab application layer
-
-Drop occurring at application layer can be symptom of some issue, but it might be also a result of normal application lifecycle, intended changes done to analytics instrumentation or experiments tracking
-or even a result of a public holiday in some regions of the world with a larger user-base. To verify if there is an underlying problem to solve, you can check following things:
-
-1. Check `about.gitlab.com` website traffic on [Google Analytics](https://analytics.google.com/analytics/web/) to verify if some public holiday might impact overall use of GitLab system
- 1. You may require to open an access request for Google Analytics access first, for example: [access request internal issue](https://gitlab.com/gitlab-com/team-member-epics/access-requests/-/issues/1772)
-1. Plot `select date(dvce_created_tstamp) , event , count(*) from legacy.snowplow_unnested_events_90 where dvce_created_tstamp > '2021-06-15' and dvce_created_tstamp < '2021-07-10' group by 1 , 2 order by 1 , 2` in SiSense to see what type of events was responsible for drop
-1. Plot `select date(dvce_created_tstamp) ,se_category , count(*) from legacy.snowplow_unnested_events_90 where dvce_created_tstamp > '2021-06-15' and dvce_created_tstamp < '2021-07-31' and event = 'struct' group by 1 , 2 order by 1, 2` what events recorded the biggest drops in suspected category
-1. Check if there was any MR merged that might cause reduction in reported events, pay an attention to ~"analytics instrumentation" and ~"growth experiment" labeled MRs
-1. Check (via [Grafana explore tab](https://dashboards.gitlab.net/explore) ) following Prometheus counters `gitlab_snowplow_events_total`, `gitlab_snowplow_failed_events_total` and `gitlab_snowplow_successful_events_total` to see how many events were fired correctly from GitLab.com. Example query to use `sum(rate(gitlab_snowplow_successful_events_total{env="gprd"}[5m])) / sum(rate(gitlab_snowplow_events_total{env="gprd"}[5m]))` would chart rate at which number of good events rose in comparison to events sent in total. If number drops from 1 it means that problem might be in communication between GitLab and AWS collectors fleet.
-1. Check [logs in Kibana](https://log.gprd.gitlab.net/app/discover#) and filter with `{ "query": { "match_phrase": { "json.message": "failed to be reported to collector at" } } }` if there are some failed events logged
-
-For results about an investigation conducted into an unexpected drop in snowplow events volume, see [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/335206).
-
-### Troubleshooting AWS layer
-
-Already conducted investigations:
-
-- [Steep decrease of Snowplow page views](https://gitlab.com/gitlab-org/gitlab/-/issues/268009)
-- [`snowplow.trx.gitlab.net` unreachable](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/5073)
-
-### Troubleshooting data warehouse layer
-
-Reach out to [Data team](https://about.gitlab.com/handbook/business-technology/data-team/) to ask about current state of data warehouse. On their handbook page there is a [section with contact details](https://about.gitlab.com/handbook/business-technology/data-team/#how-to-connect-with-us)
-
-## Delay in Snowplow Enrichers
-
-If there is an alert for **Snowplow Raw Good Stream Backing Up**, we receive an email notification. This sometimes happens because Snowplow Enrichers don't scale well enough for the amount of Snowplow events.
-
-If the delay goes over 48 hours, we lose data.
-
-### Contact SRE on-call
-
-Send a message in the [#infrastructure_lounge](https://gitlab.slack.com/archives/CB3LSMEJV) Slack channel using the following template:
-
-```markdown
-Hello team!
-
-We received an alert for [Snowplow Raw Good Stream Backing Up](https://us-east-1.console.aws.amazon.com/cloudwatch/home?region=us-east-1#alarmsV2:alarm/SnowPlow+Raw+Good+Stream+Backing+Up?).
-
-Enrichers are not scalling well for the amount of events we receive.
-
-See the [dashboard](https://us-east-1.console.aws.amazon.com/cloudwatch/home?region=us-east-1#dashboards:name=SnowPlow).
-
-Could we get assistance to fix the delay?
-
-Thank you!
-```
+<!-- This redirect file can be deleted after <2023-08-20>. -->
+<!-- Redirects that point to other docs in the same project expire in three months. -->
+<!-- Redirects that point to docs in a different project or site (for example, link is not relative and starts with `https:`) expire in one year. -->
+<!-- Before deletion, see: https://docs.gitlab.com/ee/development/documentation/redirects.html
diff --git a/doc/raketasks/index.md b/doc/raketasks/index.md
index a44f053bc7b..47fa7e855a1 100644
--- a/doc/raketasks/index.md
+++ b/doc/raketasks/index.md
@@ -44,7 +44,7 @@ The following Rake tasks are available for use with GitLab:
| [Reset user passwords](../security/reset_user_password.md#use-a-rake-task) | Reset user passwords using Rake. |
| [Uploads migrate](../administration/raketasks/uploads/migrate.md) | Migrate uploads between local storage and object storage. |
| [Uploads sanitize](../administration/raketasks/uploads/sanitize.md) | Remove EXIF data from images uploaded to earlier versions of GitLab. |
-| [Service Data](../development/service_ping/troubleshooting.md#generate-service-ping) | Generate and troubleshoot [Service Ping](../development/service_ping/index.md). |
+| [Service Data](../development/internal_analytics/service_ping/troubleshooting.md#generate-service-ping) | Generate and troubleshoot [Service Ping](../development/internal_analytics/service_ping/index.md). |
| [User management](user_management.md) | Perform user management tasks. |
| [Webhooks administration](web_hooks.md) | Maintain project webhooks. |
| [X.509 signatures](x509_signatures.md) | Update X.509 commit signatures, which can be useful if the certificate store changed. |
diff --git a/doc/update/deprecations.md b/doc/update/deprecations.md
index 287c81f6801..155b32e4a2a 100644
--- a/doc/update/deprecations.md
+++ b/doc/update/deprecations.md
@@ -273,6 +273,22 @@ This change is a breaking change. You should use an [authentication token](../ci
<div class="deprecation breaking-change" data-milestone="17.0">
+### GraphQL deprecation of `dependencyProxyTotalSizeInBytes` field
+
+<div class="deprecation-notes">
+- Announced in: GitLab <span class="milestone">16.1</span>
+- This is a [breaking change](https://docs.gitlab.com/ee/development/deprecation_guidelines/).
+- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/414236).
+</div>
+
+You can use GraphQL to query the amount of storage used by the GitLab Dependency Proxy. However, the `dependencyProxyTotalSizeInBytes` field is limited to ~2Gb (in bytes), which is not always large enough for the Dependency Proxy. As a result, `dependencyProxyTotalSizeInBytes` is deprecated and will be removed in GitLab 17.0.
+
+Use `dependencyProxyTotalSizeBytes` instead, introduced in GitLab 16.1.
+
+</div>
+
+<div class="deprecation breaking-change" data-milestone="17.0">
+
### GraphQL type, `RunnerMembershipFilter` renamed to `CiRunnerMembershipFilter`
<div class="deprecation-notes">
diff --git a/doc/user/admin_area/settings/index.md b/doc/user/admin_area/settings/index.md
index c74d39feb33..923181bfc5d 100644
--- a/doc/user/admin_area/settings/index.md
+++ b/doc/user/admin_area/settings/index.md
@@ -97,7 +97,7 @@ The **Integrations** settings contain:
[available for self-managed instances in the future](https://gitlab.com/gitlab-org/gitlab/-/issues/28164).
- [Customer experience improvement and third-party offers](third_party_offers.md) -
Control the display of customer experience improvement content and third-party offers.
-- [Snowplow](../../../development/snowplow/index.md) - Configure the Snowplow integration.
+- [Snowplow](../../../development/internal_analytics/snowplow/index.md) - Configure the Snowplow integration.
- [Google GKE](../../project/clusters/add_gke_clusters.md) - Google GKE integration enables
you to provision GKE clusters from GitLab.
- [Amazon EKS](../../project/clusters/add_eks_clusters.md) - Amazon EKS integration enables
diff --git a/doc/user/admin_area/settings/usage_statistics.md b/doc/user/admin_area/settings/usage_statistics.md
index 4cd71a31c76..ed0c8d21931 100644
--- a/doc/user/admin_area/settings/usage_statistics.md
+++ b/doc/user/admin_area/settings/usage_statistics.md
@@ -14,7 +14,7 @@ All usage statistics are [opt-out](#enable-or-disable-usage-statistics).
## Service Ping
Service Ping is a process that collects and sends a weekly payload to GitLab Inc.
-For more information, see the [Service Ping guide](../../../development/service_ping/index.md). When Service Ping is enabled, GitLab gathers data from other instances and enables certain [instance-level analytics features](../analytics/index.md)
+For more information, see the [Service Ping guide](../../../development/internal_analytics/service_ping/index.md). When Service Ping is enabled, GitLab gathers data from other instances and enables certain [instance-level analytics features](../analytics/index.md)
that are dependent on Service Ping.
### Why enable Service Ping?
@@ -138,7 +138,7 @@ The payload is available in the [Service Usage data](#manually-upload-service-pi
NOTE:
The method to disable Service Ping in the GitLab configuration file does not work in
-GitLab versions 9.3 to 13.12.3. For more information about how to disable it, see [troubleshooting](../../../development/service_ping/troubleshooting.md#cannot-disable-service-ping-with-the-configuration-file).
+GitLab versions 9.3 to 13.12.3. For more information about how to disable it, see [troubleshooting](../../../development/internal_analytics/service_ping/troubleshooting.md#cannot-disable-service-ping-with-the-configuration-file).
To disable Service Ping and prevent it from being configured in the future through
the Admin Area:
@@ -186,7 +186,7 @@ You can view the exact JSON payload sent to GitLab Inc. in the Admin Area. To vi
1. Expand the **Usage statistics** section.
1. Select **Preview payload**.
-For an example payload, see [Example Service Ping payload](../../../development/service_ping/index.md#example-service-ping-payload).
+For an example payload, see [Example Service Ping payload](../../../development/internal_analytics/service_ping/index.md#example-service-ping-payload).
## Manually upload Service Ping payload
@@ -194,7 +194,7 @@ For an example payload, see [Example Service Ping payload](../../../development/
> - [Feature flag removed](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/83265) in GitLab 14.10.
You can upload the Service Ping payload to GitLab even if your instance doesn't have internet access,
-or if the Service Ping [cron job](../../../development/service_ping/index.md#how-service-ping-works) is not enabled.
+or if the Service Ping [cron job](../../../development/internal_analytics/service_ping/index.md#how-service-ping-works) is not enabled.
To upload the payload manually:
diff --git a/doc/user/packages/container_registry/delete_container_registry_images.md b/doc/user/packages/container_registry/delete_container_registry_images.md
index 18c7f092929..b645dc3a3e6 100644
--- a/doc/user/packages/container_registry/delete_container_registry_images.md
+++ b/doc/user/packages/container_registry/delete_container_registry_images.md
@@ -32,10 +32,10 @@ The online garbage collector is an instance-wide feature, and applies to all nam
To delete container images using the GitLab UI:
-1. On the top bar, select **Main menu**, and:
- - For a project, select **Projects** and find your project.
- - For a group, select **Groups** and find your group.
-1. On the left sidebar, select **Packages and registries > Container Registry**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. For:
+ - A group, select **Operate > Container Registry**.
+ - A project, select **Deploy > Container Registry**.
1. From the **Container Registry** page, you can select what you want to delete,
by either:
diff --git a/doc/user/packages/container_registry/index.md b/doc/user/packages/container_registry/index.md
index c27265ccc3f..f9b1138ed84 100644
--- a/doc/user/packages/container_registry/index.md
+++ b/doc/user/packages/container_registry/index.md
@@ -21,10 +21,10 @@ rate limits and speed up your pipelines. For more information about the Docker R
You can view the Container Registry for a project or group.
-1. On the top bar, select **Main menu**, and:
- - For a project, select **Projects** and find your project.
- - For a group, select **Groups** and find your group.
-1. On the left sidebar, select **Packages and registries > Container Registry**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. For:
+ - A group, select **Operate > Container Registry**.
+ - A project, select **Deploy > Container Registry**.
You can search, sort, filter, and [delete](delete_container_registry_images.md#use-the-gitlab-ui)
your container images. You can share a filtered view by copying the URL from your browser.
@@ -38,10 +38,10 @@ If a project is public, the Container Registry is also public.
You can use the Container Registry **Tag Details** page to view a list of tags associated with a given container image:
-1. On the top bar, select **Main menu**, and:
- - For a project, select **Projects** and find your project.
- - For a group, select **Groups** and find your group.
-1. On the left sidebar, select **Packages and registries > Container Registry**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. For:
+ - A group, select **Operate > Container Registry**.
+ - A project, select **Deploy > Container Registry**.
1. Select your container image.
You can view details about each tag, such as when it was published, how much storage it consumes,
@@ -54,10 +54,10 @@ tags on this page. You can share a filtered view by copying the URL from your br
To download and run a container image hosted in the Container Registry:
-1. On the top bar, select **Main menu**, and:
- - For a project, select **Projects** and find your project.
- - For a group, select **Groups** and find your group.
-1. On the left sidebar, select **Packages and registries > Container Registry**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. For:
+ - A group, select **Operate > Container Registry**.
+ - A project, select **Deploy > Container Registry**.
1. Find the container image you want to work with and select **Copy**.
![Container Registry image URL](img/container_registry_hover_path_13_4.png)
@@ -115,13 +115,13 @@ The Container Registry is enabled by default.
You can, however, remove the Container Registry for a project:
-1. On the top bar, select **Main menu > Projects**.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand the **Visibility, project features, permissions** section
and disable **Container Registry**.
1. Select **Save changes**.
-The **Packages and registries > Container Registry** entry is removed from the project's sidebar.
+The **Deploy > Container Registry** entry is removed from the project's sidebar.
## Change visibility of the Container Registry
@@ -133,8 +133,8 @@ You can, however, change the visibility of the Container Registry for a project.
For more information about the permissions that this setting grants to users,
see [Container Registry visibility permissions](#container-registry-visibility-permissions).
-1. On the top bar, select **Main menu > Projects**.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand the section **Visibility, project features, permissions**.
1. Under **Container Registry**, select an option from the dropdown list:
diff --git a/doc/user/packages/container_registry/reduce_container_registry_storage.md b/doc/user/packages/container_registry/reduce_container_registry_storage.md
index db9e2e84264..e3ca78becf1 100644
--- a/doc/user/packages/container_registry/reduce_container_registry_storage.md
+++ b/doc/user/packages/container_registry/reduce_container_registry_storage.md
@@ -229,8 +229,10 @@ For self-managed instances, those settings can be updated in the [Rails console]
They are also available in the [administrator area](../../admin_area/index.md):
-1. On the top bar, select **Main menu > Admin**.
-1. Go to **Settings > CI/CD > Container Registry**.
+1. On the left sidebar, expand the top-most chevron (**{chevron-down}**).
+1. Select **Admin Area**.
+1. On the left sidebar, select **Settings > CI/CD**
+1. Expand **Container Registry**.
### Use the cleanup policy API
diff --git a/doc/user/packages/container_registry/troubleshoot_container_registry.md b/doc/user/packages/container_registry/troubleshoot_container_registry.md
index 68fe430e531..729f4919188 100644
--- a/doc/user/packages/container_registry/troubleshoot_container_registry.md
+++ b/doc/user/packages/container_registry/troubleshoot_container_registry.md
@@ -91,8 +91,8 @@ The following procedure uses these sample project names:
There may be a delay while the images are queued and deleted.
1. Change the path or transfer the project:
- 1. On the top bar, select **Main menu > Projects** and find your project.
- 1. On the left sidebar, select **Settings > General**.
+ 1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+ 1. Select **Settings > General**.
1. Expand the **Advanced** section.
1. In the **Change path** text box, edit the path.
1. Select **Change path**.
diff --git a/doc/user/packages/dependency_proxy/index.md b/doc/user/packages/dependency_proxy/index.md
index e70ad0fd585..ebe87332948 100644
--- a/doc/user/packages/dependency_proxy/index.md
+++ b/doc/user/packages/dependency_proxy/index.md
@@ -39,8 +39,8 @@ For a list of planned additions, view the
To enable or turn off the Dependency Proxy for a group:
-1. On the top bar, select **Main menu > Groups** and find your group.
-1. On the left sidebar, select **Settings > Packages and registries**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your group.
+1. Select **Settings > Packages and registries**.
1. Expand the **Dependency Proxy** section.
1. To enable the proxy, turn on **Enable Proxy**. To turn it off, turn the toggle off.
@@ -52,8 +52,8 @@ for the entire GitLab instance.
To view the Dependency Proxy:
-1. On the top bar, select **Main menu > Groups** and find your group.
-1. On the left sidebar, select **Packages and registries > Dependency Proxy**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your group.
+1. Select **Operate > Dependency Proxy**.
The Dependency Proxy is not available for projects.
@@ -177,8 +177,8 @@ You can also use [custom CI/CD variables](../../../ci/variables/index.md#for-a-p
To store a Docker image in Dependency Proxy storage:
-1. On the top bar, select **Main menu > Groups** and find your group.
-1. On the left sidebar, select **Packages and registries > Dependency Proxy**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your group.
+1. Select **Operate > Dependency Proxy**.
1. Copy the **Dependency Proxy image prefix**.
1. Use one of these commands. In these examples, the image is `alpine:latest`.
1. You can also pull images by digest to specify exactly which version of an image to pull.
diff --git a/doc/user/packages/harbor_container_registry/index.md b/doc/user/packages/harbor_container_registry/index.md
index 6cea541a55d..2bff6f79a27 100644
--- a/doc/user/packages/harbor_container_registry/index.md
+++ b/doc/user/packages/harbor_container_registry/index.md
@@ -12,9 +12,8 @@ You can integrate the [Harbor container registry](../../../user/project/integrat
You can view the Harbor Registry for a project or group.
-1. On the top bar, select **Main menu > Projects/Groups**.
-1. Go to the project or group that you are interested in.
-1. On the left sidebar, select **Packages and registries > Harbor Registry**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Operate > Harbor Registry**.
You can search, sort, and filter images on this page. You can share a filtered view by copying the URL from your browser.
@@ -30,7 +29,8 @@ Default settings for the Harbor integration at the project level are inherited f
To download and run a Harbor image hosted in the GitLab Harbor Registry:
1. Copy the link to your container image:
- 1. Go to your project or group's **Packages and registries > Harbor Registry** and find the image you want.
+ 1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+ 1. Select **Operate > Harbor Registry** and find the image you want.
1. Select the **Copy** icon next to the image name.
1. Use the command to run the container image you want.
@@ -39,8 +39,8 @@ To download and run a Harbor image hosted in the GitLab Harbor Registry:
To view the list of tags associated with a specific artifact:
-1. Go to your project or group.
-1. Go to **Packages and registries > Harbor Registry**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Go to **Operate > Harbor Registry**.
1. Select the image name to view its artifacts.
1. Select the artifact you want.
@@ -55,13 +55,18 @@ To build and push to the Harbor Registry:
1. Authenticate with the Harbor Registry.
1. Run the command to build or push.
-To view these commands, go to your project's **Packages and registries > Harbor Registry > CLI Commands**.
+To view these commands:
+
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Operate > Harbor Registry**.
+1. Select **CLI Commands**.
## Disable the Harbor Registry for a project
To remove the Harbor Registry for a project:
-1. Go to your project/group's **Settings > Integrations** page.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Settings > Integrations**.
1. Select **Harbor** under **Active integrations**.
1. Clear the **Active** checkbox under **Enable integration**.
1. Select **Save changes**.
diff --git a/doc/user/project/settings/index.md b/doc/user/project/settings/index.md
index 0a4be5d7053..f0b4ca1dc58 100644
--- a/doc/user/project/settings/index.md
+++ b/doc/user/project/settings/index.md
@@ -13,8 +13,8 @@ Use the **Settings** page to manage the configuration options in your [project](
You must have at least the Maintainer role to view project settings.
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. To display all settings in a section, select **Expand**.
1. Optional. Use the search box to find a setting.
@@ -23,8 +23,8 @@ You must have at least the Maintainer role to view project settings.
Use the project general settings to edit your project details.
1. Sign in to GitLab with at least the Maintainer role.
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. In the **Project name** text box, enter your project name.
1. In the **Project description** text box, enter your project description.
1. Under **Project avatar**, to change your project avatar, select **Choose file**.
@@ -35,8 +35,8 @@ Use topics to categorize projects and find similar new projects.
To assign topics to a project:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings** > **General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings** > **General**.
1. In the **Topics** text box, enter the project topics. Popular topics are suggested as you type.
1. Select **Save changes**.
@@ -49,8 +49,8 @@ If you're an instance administrator, you can administer all project topics from
compliance framework using either:
- The GitLab UI:
- 1. On the top bar, select **Main menu > Projects > View all projects** and find your project.
- 1. On the left sidebar, select **Settings** > **General**.
+ 1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+ 1. Select **Settings** > **General**.
1. Expand the **Compliance frameworks** section.
1. Select a compliance framework.
1. Select **Save changes**.
@@ -66,8 +66,8 @@ Frameworks can not be added to projects in personal namespaces.
To configure visibility, features, and permissions for a project:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand the **Visibility, project features, permissions** section.
1. To change the project visibility, select the dropdown list. If you select to **Public**, you limit access to some features to **Only Project Members**.
1. To allow users to request access to the project, select the **Users can request access** checkbox.
@@ -133,8 +133,8 @@ In some environments, users can submit a [CVE identifier request](../../applicat
To disable the CVE identifier request option in issues in your project:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand the **Visibility, project features, permissions** section.
1. Under **Issues**, turn off the **CVE ID requests in the issue sidebar** toggle.
1. Select **Save changes**.
@@ -145,8 +145,8 @@ Prerequisites:
- You must be an Owner of the project to disable email notifications related to the project.
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand the **Visibility, project features, permissions** section.
1. Clear the **Disable email notifications** checkbox.
@@ -187,8 +187,8 @@ other features are read-only. Archived projects are also hidden from project lis
To archive a project:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand **Advanced**.
1. In the **Archive project** section, select **Archive project**.
1. To confirm, select **OK**.
@@ -203,7 +203,8 @@ Prerequisites:
- To unarchive a project, you must be an administrator or a project Owner.
1. Find the archived project.
- 1. On the top bar, select **Main menu > Projects > View all projects**.
+ 1. On the left sidebar, expand the top-most chevron (**{chevron-down}**).
+ 1. Select **View all your projects**.
1. Select **Explore projects**.
1. In the **Sort projects** dropdown list, select **Show archived projects**.
1. In the **Filter by name** field, enter the project name.
@@ -228,8 +229,8 @@ When you change the repository path, users may experience issues if they push to
To rename a repository:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand the **Advanced** section.
1. In the **Change path** text box, edit the path.
1. Select **Change path**.
@@ -241,8 +242,8 @@ In merge requests, you can change the default behavior so that the
To set this default:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > Merge requests**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > Merge requests**.
1. Select **Enable "Delete source branch" option by default**.
1. Select **Save changes**.
@@ -261,8 +262,8 @@ Prerequisites:
To transfer a project:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand **Advanced**.
1. Under **Transfer project**, choose the namespace to transfer the project to.
1. Select **Transfer project**.
@@ -297,8 +298,8 @@ Prerequisite:
To delete a project:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand the **Advanced** section.
1. In the **Delete this project** section, select **Delete project**.
1. In the confirmation message text field, enter the name of the project as instructed, and select **Yes, delete project**.
@@ -333,8 +334,8 @@ Prerequisites:
To immediately delete a project marked for deletion:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand the **Advanced** section.
1. In the **Delete this project** section, select **Delete project**.
1. In the confirmation message text field, enter the name of the project as instructed, as select **Yes, delete project**.
@@ -345,7 +346,9 @@ To immediately delete a project marked for deletion:
To restore a project marked for deletion:
-1. Navigate to your project, and select **Settings > General > Advanced**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
+1. Expand **Advanced**.
1. In the Restore project section, select **Restore project**.
## Monitor settings
diff --git a/doc/user/project/wiki/group.md b/doc/user/project/wiki/group.md
index 9327ce53b3f..2271c33b5b4 100644
--- a/doc/user/project/wiki/group.md
+++ b/doc/user/project/wiki/group.md
@@ -27,10 +27,10 @@ can edit group wikis. Group wiki repositories can be moved using the
To access a group wiki:
-1. On the top bar, select **Main menu > Groups** and find your group.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your group.
1. To display the wiki, either:
- - On the left sidebar, select **Wiki**.
- - On any page in the project, use the <kbd>g</kbd> + <kbd>w</kbd>
+ - On the left sidebar, select **Plan > Wiki**.
+ - On any page in the group, use the <kbd>g</kbd> + <kbd>w</kbd>
[wiki keyboard shortcut](../../shortcuts.md).
## Export a group wiki
@@ -67,8 +67,8 @@ can enable or disable a group wiki through the group settings.
To open group settings:
-1. On the top bar, select **Main menu > Groups** and find your group.
-1. On the left sidebar, select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your group.
+1. Select **Settings > General**.
1. Expand **Permissions and group features**.
1. Scroll to **Wiki** and select one of these options:
- **Enabled**: For public groups, everyone can access the wiki. For internal groups, only authenticated users can access the wiki.
diff --git a/doc/user/project/wiki/index.md b/doc/user/project/wiki/index.md
index a6784e598da..0d3782cfd09 100644
--- a/doc/user/project/wiki/index.md
+++ b/doc/user/project/wiki/index.md
@@ -31,9 +31,9 @@ with sibling pages listed in alphabetical order. To view a list of all pages, se
To access a project wiki:
-1. On the top bar, select **Main menu > Projects** and find your project.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
1. To display the wiki, either:
- - On the left sidebar, select **Wiki**.
+ - On the left sidebar, select **Plan > Wiki**.
- On any page in the project, use the <kbd>g</kbd> + <kbd>w</kbd>
[wiki keyboard shortcut](../../shortcuts.md).
@@ -61,10 +61,8 @@ When a wiki is created, it is empty. On your first visit, you can create the
home page users see when viewing the wiki. This page requires a specific title
to be used as your wiki's home page. To create it:
-1. On the top bar, select **Main menu**.
- - For project wikis, select **Projects** and find your project.
- - For group wikis, select **Groups** and find your group.
-1. On the left sidebar, select **Wiki**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Plan > Wiki**.
1. Select **Create your first page**.
1. GitLab requires this first page be titled `home`. The page with this
title serves as the front page for your wiki.
@@ -79,10 +77,8 @@ to be used as your wiki's home page. To create it:
Users with at least the Developer role can create new wiki pages:
-1. On the top bar, select **Main menu**.
- - For project wikis, select **Projects** and find your project.
- - For group wikis, select **Groups** and find your group.
-1. On the left sidebar, select **Wiki**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Plan > Wiki**.
1. Select **New page** on this page, or any other wiki page.
1. Select a content format.
1. Add a title for your new page. Page titles use
@@ -142,10 +138,8 @@ may not be able to check out the wiki locally afterward.
You need at least the Developer role to edit a wiki page:
-1. On the top bar, select **Main menu**.
- - For project wikis, select **Projects** and find your project.
- - For group wikis, select **Groups** and find your group.
-1. On the left sidebar, select **Wiki**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Plan > Wiki**.
1. Go to the page you want to edit, and either:
- Use the <kbd>e</kbd> wiki [keyboard shortcut](../../shortcuts.md#wiki-pages).
- Select the edit icon (**{pencil}**).
@@ -163,10 +157,8 @@ For an example, read [Table of contents](../../markdown.md#table-of-contents).
You need at least the Developer role to delete a wiki page:
-1. On the top bar, select **Main menu**.
- - For project wikis, select **Projects** and find your project.
- - For group wikis, select **Groups** and find your group.
-1. On the left sidebar, select **Wiki**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Plan > Wiki**.
1. Go to the page you want to delete.
1. Select the edit icon (**{pencil}**).
1. Select **Delete page**.
@@ -176,10 +168,8 @@ You need at least the Developer role to delete a wiki page:
You need at least the Developer role to move a wiki page:
-1. On the top bar, select **Main menu**.
- - For project wikis, select **Projects** and find your project.
- - For group wikis, select **Groups** and find your group.
-1. On the left sidebar, select **Wiki**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Plan > Wiki**.
1. Go to the page you want to move.
1. Select the edit icon (**{pencil}**).
1. Add the new path to the **Title** field. For example, if you have a wiki page
@@ -202,10 +192,8 @@ The history page shows:
To view the changes for a wiki page:
-1. On the top bar, select **Main menu**.
- - For project wikis, select **Projects** and find your project.
- - For group wikis, select **Groups** and find your group.
-1. On the left sidebar, select **Wiki**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Plan > Wiki**.
1. Go to the page you want to view history for.
1. Select **Page history**.
@@ -215,10 +203,8 @@ To view the changes for a wiki page:
You can see the changes made in a version of a wiki page, similar to versioned diff file views:
-1. On the top bar, select **Main menu**.
- - For project wikis, select **Projects** and find your project.
- - For group wikis, select **Groups** and find your group.
-1. On the left sidebar, select **Wiki**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Plan > Wiki**.
1. Go to the wiki page you're interested in.
1. Select **Page history** to see all page versions.
1. Select the commit message in the **Changes** column for the version you're interested in.
@@ -248,10 +234,8 @@ You need at least the Developer role to customize the wiki
navigation sidebar. This process creates a wiki page named `_sidebar` which fully
replaces the default sidebar navigation:
-1. On the top bar, select **Main menu**.
- - For project wikis, select **Projects** and find your project.
- - For group wikis, select **Groups** and find your group.
-1. On the left sidebar, select **Wiki**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
+1. Select **Plan > Wiki**.
1. In the upper-right corner of the page, select **Edit sidebar**.
1. When complete, select **Save changes**.
@@ -284,8 +268,8 @@ You can disable group wikis from the [group settings](group.md#configure-group-w
To add a link to an external wiki from a project's left sidebar:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > Integrations**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > Integrations**.
1. Select **External wiki**.
1. Add the URL to your external wiki.
1. Optional. Select **Test settings**.
@@ -300,8 +284,8 @@ To hide the internal wiki from the sidebar, [disable the project's wiki](#disabl
To hide the link to an external wiki:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. On the left sidebar, select **Settings > Integrations**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > Integrations**.
1. Select **External wiki**.
1. In the **Enable integration** section, clear the **Active** checkbox.
1. Select **Save changes**.
@@ -310,8 +294,8 @@ To hide the link to an external wiki:
To disable a project's internal wiki:
-1. On the top bar, select **Main menu > Projects** and find your project.
-1. Go to your project and select **Settings > General**.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. Select **Settings > General**.
1. Expand **Visibility, project features, permissions**.
1. Scroll down to find **Wiki** and toggle it off (in gray).
1. Select **Save changes**.
diff --git a/doc/user/usage_quotas.md b/doc/user/usage_quotas.md
index 933d950eb35..5c6c64a3485 100644
--- a/doc/user/usage_quotas.md
+++ b/doc/user/usage_quotas.md
@@ -23,7 +23,7 @@ Prerequisites:
- To view storage usage for a project, you must have at least the Maintainer role for the project or Owner role for the namespace.
- To view storage usage for a namespace, you must have the Owner role for the namespace.
-1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project.
+1. On the left sidebar, at the top, select **Search GitLab** (**{search}**) to find your project or group.
1. On the left sidebar, select **Settings > Usage Quotas**.
1. Select the **Storage** tab.
diff --git a/lib/gitlab/audit/type/definition.rb b/lib/gitlab/audit/type/definition.rb
index f9a5ec40adf..772023616b8 100644
--- a/lib/gitlab/audit/type/definition.rb
+++ b/lib/gitlab/audit/type/definition.rb
@@ -82,6 +82,12 @@ module Gitlab
definitions.keys.map(&:to_s)
end
+ def names_with_category
+ definitions.map do |event_name, value|
+ { event_name: event_name, feature_category: value.attributes[:feature_category] }
+ end
+ end
+
def defined?(key)
get(key).present?
end
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 56d43fef4ec..88fe8fed849 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -6543,9 +6543,6 @@ msgstr ""
msgid "AuditStreams|Custom HTTP headers (optional)"
msgstr ""
-msgid "AuditStreams|Defines which streaming events are captured"
-msgstr ""
-
msgid "AuditStreams|Delete %{link}"
msgstr ""
@@ -6564,13 +6561,13 @@ msgstr ""
msgid "AuditStreams|Event filtering (optional)"
msgstr ""
-msgid "AuditStreams|Header"
+msgid "AuditStreams|Filter by stream event"
msgstr ""
-msgid "AuditStreams|Maximum of %{number} HTTP headers has been reached."
+msgid "AuditStreams|Header"
msgstr ""
-msgid "AuditStreams|No filters available. %{linkStart}How do I add a filter?%{linkEnd}"
+msgid "AuditStreams|Maximum of %{number} HTTP headers has been reached."
msgstr ""
msgid "AuditStreams|Remove custom header"
@@ -6579,6 +6576,9 @@ msgstr ""
msgid "AuditStreams|Save external stream destination"
msgstr ""
+msgid "AuditStreams|Select events"
+msgstr ""
+
msgid "AuditStreams|Setup streaming for audit events"
msgstr ""
@@ -42726,9 +42726,6 @@ msgstr ""
msgid "Show file contents"
msgstr ""
-msgid "Show filters"
-msgstr ""
-
msgid "Show full blame"
msgstr ""
diff --git a/package.json b/package.json
index 52ad5f72f6f..1f716c7d96b 100644
--- a/package.json
+++ b/package.json
@@ -57,7 +57,7 @@
"@gitlab/favicon-overlay": "2.0.0",
"@gitlab/fonts": "^1.2.0",
"@gitlab/svgs": "3.53.0",
- "@gitlab/ui": "64.6.2",
+ "@gitlab/ui": "64.10.1",
"@gitlab/visual-review-tools": "1.7.3",
"@gitlab/web-ide": "0.0.1-dev-20230614124516",
"@mattiasbuelens/web-streams-adapter": "^0.1.0",
diff --git a/spec/features/admin/admin_settings_spec.rb b/spec/features/admin/admin_settings_spec.rb
index 5f0d697b1e0..3e08d2277c1 100644
--- a/spec/features/admin/admin_settings_spec.rb
+++ b/spec/features/admin/admin_settings_spec.rb
@@ -984,7 +984,17 @@ RSpec.describe 'Admin updates settings', feature_category: :shared do
end
context 'when service data cached', :use_clean_rails_memory_store_caching do
+ let(:usage_data) { { uuid: "1111", hostname: "localhost", counts: { issue: 0 } }.deep_stringify_keys }
+
before do
+ # We are mocking Gitlab::Usage::ServicePingReport because this dataset generation
+ # takes a very long time, and is not what we're testing in this context.
+ #
+ # See https://gitlab.com/gitlab-org/gitlab/-/issues/414929
+ allow(Gitlab::UsageData).to receive(:data).and_return(usage_data)
+ allow(Gitlab::Usage::ServicePingReport).to receive(:with_instrumentation_classes)
+ .with(usage_data, :with_value).and_return(usage_data)
+
visit usage_data_admin_application_settings_path
visit service_usage_data_admin_application_settings_path
end
diff --git a/spec/frontend/analytics/shared/components/projects_dropdown_filter_spec.js b/spec/frontend/analytics/shared/components/projects_dropdown_filter_spec.js
index 364f0a2e372..4e0b546b3d2 100644
--- a/spec/frontend/analytics/shared/components/projects_dropdown_filter_spec.js
+++ b/spec/frontend/analytics/shared/components/projects_dropdown_filter_spec.js
@@ -153,6 +153,7 @@ describe('ProjectsDropdownFilter component', () => {
beforeEach(() => {
createComponent({
mountFn: mountExtended,
+ props: blockDefaultProps,
});
});
@@ -168,14 +169,16 @@ describe('ProjectsDropdownFilter component', () => {
expect(findSelectedProjectsLabel().text()).toBe(projects[0].name);
});
- it('renders the clear all button', () => {
+ it('renders the clear all button', async () => {
+ await selectDropdownItemAtIndex([0], false);
+
expect(findClearAllButton().exists()).toBe(true);
});
it('clears all selected items when the clear all button is clicked', async () => {
createComponent({
mountFn: mountExtended,
- props: { multiSelect: true },
+ props: blockDefaultProps,
});
await waitForPromises();
diff --git a/spec/frontend/lib/utils/listbox_helpers_spec.js b/spec/frontend/lib/utils/listbox_helpers_spec.js
new file mode 100644
index 00000000000..189aad41ceb
--- /dev/null
+++ b/spec/frontend/lib/utils/listbox_helpers_spec.js
@@ -0,0 +1,89 @@
+import { getSelectedOptionsText } from '~/lib/utils/listbox_helpers';
+
+describe('getSelectedOptionsText', () => {
+ it('returns an empty string per default when no options are selected', () => {
+ const options = [
+ { id: 1, text: 'first' },
+ { id: 2, text: 'second' },
+ ];
+ const selected = [];
+
+ expect(getSelectedOptionsText({ options, selected })).toBe('');
+ });
+
+ it('returns the provided placeholder when no options are selected', () => {
+ const options = [
+ { id: 1, text: 'first' },
+ { id: 2, text: 'second' },
+ ];
+ const selected = [];
+ const placeholder = 'placeholder';
+
+ expect(getSelectedOptionsText({ options, selected, placeholder })).toBe(placeholder);
+ });
+
+ describe('maxOptionsShown is not provided', () => {
+ it('returns the text of the first selected option when only one option is selected', () => {
+ const options = [{ id: 1, text: 'first' }];
+ const selected = [options[0].id];
+
+ expect(getSelectedOptionsText({ options, selected })).toBe('first');
+ });
+
+ it('should also work with the value property', () => {
+ const options = [{ value: 1, text: 'first' }];
+ const selected = [options[0].value];
+
+ expect(getSelectedOptionsText({ options, selected })).toBe('first');
+ });
+
+ it.each`
+ options | expectedText
+ ${[{ id: 1, text: 'first' }, { id: 2, text: 'second' }]} | ${'first +1 more'}
+ ${[{ id: 1, text: 'first' }, { id: 2, text: 'second' }, { id: 3, text: 'third' }]} | ${'first +2 more'}
+ `(
+ 'returns "$expectedText" when more than one option is selected',
+ ({ options, expectedText }) => {
+ const selected = options.map(({ id }) => id);
+
+ expect(getSelectedOptionsText({ options, selected })).toBe(expectedText);
+ },
+ );
+ });
+
+ describe('maxOptionsShown > 1', () => {
+ const options = [
+ { id: 1, text: 'first' },
+ { id: 2, text: 'second' },
+ { id: 3, text: 'third' },
+ { id: 4, text: 'fourth' },
+ { id: 5, text: 'fifth' },
+ ];
+
+ it.each`
+ selected | maxOptionsShown | expectedText
+ ${[1]} | ${2} | ${'first'}
+ ${[1, 2]} | ${2} | ${'first, second'}
+ ${[1, 2, 3]} | ${2} | ${'first, second +1 more'}
+ ${[1, 2, 3]} | ${3} | ${'first, second, third'}
+ ${[1, 2, 3, 4]} | ${3} | ${'first, second, third +1 more'}
+ ${[1, 2, 3, 4, 5]} | ${3} | ${'first, second, third +2 more'}
+ `(
+ 'returns "$expectedText" when "$selected.length" options are selected and maxOptionsShown is "$maxOptionsShown"',
+ ({ selected, maxOptionsShown, expectedText }) => {
+ expect(getSelectedOptionsText({ options, selected, maxOptionsShown })).toBe(expectedText);
+ },
+ );
+ });
+
+ it('ignores selected options that are not in the options array', () => {
+ const options = [
+ { id: 1, text: 'first' },
+ { id: 2, text: 'second' },
+ ];
+ const invalidOption = { id: 3, text: 'third' };
+ const selected = [options[0].id, options[1].id, invalidOption.id];
+
+ expect(getSelectedOptionsText({ options, selected })).toBe('first +1 more');
+ });
+});
diff --git a/spec/lib/gitlab/audit/type/definition_spec.rb b/spec/lib/gitlab/audit/type/definition_spec.rb
index d1d6b0d7a78..9c311677883 100644
--- a/spec/lib/gitlab/audit/type/definition_spec.rb
+++ b/spec/lib/gitlab/audit/type/definition_spec.rb
@@ -281,6 +281,30 @@ RSpec.describe Gitlab::Audit::Type::Definition do
end
end
+ describe '.names_with_category' do
+ let(:store1) { Dir.mktmpdir('path1') }
+
+ before do
+ allow(described_class).to receive(:paths).and_return(
+ [
+ File.join(store1, '**', '*.yml')
+ ]
+ )
+ end
+
+ subject { described_class.names_with_category }
+
+ after do
+ FileUtils.rm_rf(store1)
+ end
+
+ it "returns an array with just the event name and feature category" do
+ write_audit_event_type(store1, path, yaml_content)
+
+ expect(subject).to eq([{ event_name: :group_deploy_token_destroyed, feature_category: 'continuous_delivery' }])
+ end
+ end
+
def write_audit_event_type(store, path, content)
path = File.join(store, path)
dir = File.dirname(path)
diff --git a/yarn.lock b/yarn.lock
index 2489c68fe94..76f7fa4dfd2 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1127,10 +1127,10 @@
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-3.53.0.tgz#6f952e59db6a3ebb59f034c240d7001a2dc8a92d"
integrity sha512-WgDZtl3ZmZgP0Fw62/YnNH/VjJbCp0bTU+qaBudic7T/ohu/Ex11RBcwakJyUZ5KWueFChGkulWKxVZz9baaDA==
-"@gitlab/ui@64.6.2":
- version "64.6.2"
- resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-64.6.2.tgz#03650ad37a17b40a0a16c279aa3bcf820541e110"
- integrity sha512-0m+T012DSHks/zsBfrwiFt+LcVTkNKP4z0hnQL0zjZM0nyo+YCv525jDmTFx2sYG4oGLMb5LuIObjH9tQT/ZHw==
+"@gitlab/ui@64.10.1":
+ version "64.10.1"
+ resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-64.10.1.tgz#4696429f9a0f128537646d01b08eecea7e6c6529"
+ integrity sha512-C1E6iAQ/7aLpGwXFmhyPs1ci6aTZe+bCN1xbGW8OJ2Ozet41tr4osI8cm1Pj4WLQL0IXqhQjg5z930HAlTR0Iw==
dependencies:
"@floating-ui/dom" "1.2.9"
bootstrap-vue "2.23.1"