Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'scripts')
-rw-r--r--scripts/allowed_warnings.txt17
-rw-r--r--scripts/api/create_issue.rb9
-rw-r--r--scripts/api/create_issue_discussion.rb12
-rw-r--r--scripts/api/find_issues.rb9
-rw-r--r--scripts/api/update_issue.rb29
-rwxr-xr-xscripts/build_gdk_image1
-rwxr-xr-xscripts/build_qa_image1
-rwxr-xr-xscripts/changed-feature-flags114
-rwxr-xr-xscripts/feature_flags/used-feature-flags (renamed from scripts/used-feature-flags)59
-rw-r--r--scripts/frontend/postinstall.js8
-rwxr-xr-xscripts/generate_rspec_pipeline.rb21
-rw-r--r--scripts/internal_events/monitor.rb170
-rwxr-xr-xscripts/lint-doc.sh12
-rwxr-xr-xscripts/lint-docs-redirects.rb52
-rwxr-xr-xscripts/pipeline/average_reports.rb66
-rwxr-xr-xscripts/pipeline/create_test_failure_issues.rb263
-rwxr-xr-xscripts/qa/quarantine-types-check4
-rwxr-xr-xscripts/qa/testcases-check6
-rwxr-xr-xscripts/regenerate-schema18
-rw-r--r--scripts/review_apps/base-config.yaml162
-rwxr-xr-xscripts/review_apps/review-apps.sh40
-rw-r--r--scripts/rspec_helpers.sh14
-rwxr-xr-xscripts/undercoverage4
-rw-r--r--scripts/utils.sh3
24 files changed, 463 insertions, 631 deletions
diff --git a/scripts/allowed_warnings.txt b/scripts/allowed_warnings.txt
index cb684166348..de98be345d3 100644
--- a/scripts/allowed_warnings.txt
+++ b/scripts/allowed_warnings.txt
@@ -8,26 +8,9 @@
# warning message so that GitLab backports don't fail.
Browserslist: caniuse-lite is outdated\. Please run next command `yarn upgrade`
-# https://github.com/mime-types/mime-types-data/pull/50#issuecomment-1060908930
-Type application/netcdf is already registered as a variant of application/netcdf\.
-
# This warning is emitted by scripts/static-analysis.
\*\*\*\* .+ had the following warning\(s\):
-# Ruby 3 extracts net-protocol into a separate gem, while Ruby 2 has it built-in.
-# This can be removed when support for Ruby 2 is dropped.
-2\.7\.0\/gems\/net-protocol-0\.1\.3\/lib\/net\/protocol\.rb:208: warning: already initialized constant Net::BufferedIO::BUFSIZE
-ruby\/2\.7\.0\/net\/protocol\.rb:206: warning: previous definition of BUFSIZE was here
-2\.7\.0\/gems\/net-protocol-0\.1\.3\/lib\/net\/protocol\.rb:504: warning: already initialized constant Net::NetPrivate::Socket
-ruby\/2\.7\.0\/net\/protocol\.rb:503: warning: previous definition of Socket was here
-2\.7\.0\/gems\/net-protocol-0\.1\.3\/lib\/net\/protocol\.rb:68: warning: already initialized constant Net::ProtocRetryError
-ruby\/2\.7\.0\/net\/protocol\.rb:66: warning: previous definition of ProtocRetryError was here
-
-# Ruby 3 does not emit warnings for pattern matching, and if it's working
-# fine in both Ruby 2 and Ruby 3, it's unlikely it'll change again.
-# This can be removed when support for Ruby 2 is dropped.
-warning: Pattern matching is experimental, and the behavior may change in future versions of Ruby!
-
# As of Ruby 3.1, one-line typesafe/destructuring pattern matching via "rightward assignment" has
# been included for multiple years with no significant negative feedback or indications of removal.
# In the event that it is removed in a future Ruby release, the changes required to fix it are
diff --git a/scripts/api/create_issue.rb b/scripts/api/create_issue.rb
deleted file mode 100644
index 1c385ce41f2..00000000000
--- a/scripts/api/create_issue.rb
+++ /dev/null
@@ -1,9 +0,0 @@
-# frozen_string_literal: true
-
-require_relative 'base'
-
-class CreateIssue < Base
- def execute(issue_data)
- client.create_issue(project, issue_data.delete(:title), issue_data)
- end
-end
diff --git a/scripts/api/create_issue_discussion.rb b/scripts/api/create_issue_discussion.rb
deleted file mode 100644
index 6471a5c2579..00000000000
--- a/scripts/api/create_issue_discussion.rb
+++ /dev/null
@@ -1,12 +0,0 @@
-# frozen_string_literal: true
-
-require_relative 'base'
-
-class CreateIssueDiscussion < Base
- def execute(discussion_data)
- client.post(
- "/projects/#{client.url_encode project}/issues/#{discussion_data.delete(:issue_iid)}/discussions",
- body: discussion_data
- )
- end
-end
diff --git a/scripts/api/find_issues.rb b/scripts/api/find_issues.rb
deleted file mode 100644
index f74f815fba9..00000000000
--- a/scripts/api/find_issues.rb
+++ /dev/null
@@ -1,9 +0,0 @@
-# frozen_string_literal: true
-
-require_relative 'base'
-
-class FindIssues < Base
- def execute(search_data)
- client.issues(project, search_data)
- end
-end
diff --git a/scripts/api/update_issue.rb b/scripts/api/update_issue.rb
deleted file mode 100644
index ce296ebc358..00000000000
--- a/scripts/api/update_issue.rb
+++ /dev/null
@@ -1,29 +0,0 @@
-# frozen_string_literal: true
-
-require 'gitlab'
-require_relative 'default_options'
-
-class UpdateIssue
- def initialize(options)
- @project = options.fetch(:project)
-
- # Force the token to be a string so that if api_token is nil, it's set to '',
- # allowing unauthenticated requests (for forks).
- api_token = options.delete(:api_token).to_s
-
- warn "No API token given." if api_token.empty?
-
- @client = Gitlab.client(
- endpoint: options.delete(:endpoint) || API::DEFAULT_OPTIONS[:endpoint],
- private_token: api_token
- )
- end
-
- def execute(issue_iid, issue_data)
- client.edit_issue(project, issue_iid, issue_data)
- end
-
- private
-
- attr_reader :project, :client
-end
diff --git a/scripts/build_gdk_image b/scripts/build_gdk_image
index cb1dbd03adb..3401c8df86c 100755
--- a/scripts/build_gdk_image
+++ b/scripts/build_gdk_image
@@ -26,6 +26,7 @@ docker buildx build \
--platform=${ARCH:-amd64} \
--tag="${IMAGE}:${SHA_TAG}" \
--tag="${IMAGE}:${BRANCH_TAG}" \
+ --provenance=false \
${OUTPUT_OPTION} \
.
diff --git a/scripts/build_qa_image b/scripts/build_qa_image
index 9c401718336..23a003e2b01 100755
--- a/scripts/build_qa_image
+++ b/scripts/build_qa_image
@@ -90,5 +90,6 @@ docker buildx build \
--build-arg=QA_BUILD_TARGET="${QA_BUILD_TARGET}" \
--file="${CI_PROJECT_DIR}/qa/Dockerfile" \
--push \
+ --provenance=false \
${DESTINATIONS} \
${CI_PROJECT_DIR}
diff --git a/scripts/changed-feature-flags b/scripts/changed-feature-flags
deleted file mode 100755
index 8c1b219e5a6..00000000000
--- a/scripts/changed-feature-flags
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env ruby
-# frozen_string_literal: true
-
-require 'yaml'
-require 'optparse'
-require 'pathname'
-require_relative 'api/default_options'
-
-# This script returns the desired feature flag state as a comma-separated string for the feature flags in the specified files.
-# Each desired feature flag state is specified as 'feature-flag=state'. This allows us to run package-and-qa with the
-# feature flag set to the desired state.
-#
-# For example, if the specified files included `config/feature_flags/development/ci_awesome_feature.yml` and the desired
-# state as specified by the second argument was enabled, the value returned would be `ci_awesome_feature=enabled`
-
-class GetFeatureFlagsFromFiles
- def initialize(options)
- @files = options.delete(:files)
- @state = options.delete(:state)
-
- abort("ERROR: Please specify the directory containing MR diffs.") if @files.to_s.empty?
- end
-
- # Gets feature flags from definition files or diffs of deleted defition files
- #
- # @return [String] a comma-separated list of feature flags and their desired state
- def extracted_flags
- flags_list = diffs_dir.glob('**/*').each_with_object([]) do |file_path, flags|
- ff_yaml = ff_yaml_for_file(file_path)
- next if ff_yaml.nil?
- break [] if ff_yaml.empty?
-
- flags << ff_yaml['name']
- end
- flags_list = flags_list.map { |flag| "#{flag}=#{state}" } unless state.to_s.empty?
- flags_list.join(',')
- end
-
- # Loads the YAML feature flag definition based on a diff of the definition file. The definition is loaded from the
- # definition file itself, or from a diff of the deleted definition file.
- #
- # @param [Pathname] path the path to the diff
- # @return [Hash] a hash containing the YAML data for the feature flag definition
- def ff_yaml_for_file(path)
- return unless File.expand_path(path).to_s =~ %r{/feature_flags/(development|ops)/.*\.yml}
-
- if path.to_s.end_with?('yml.deleted.diff')
- # Ignore deleted feature flag definitions if we want to enable/disable existing flags.
- return if state != 'deleted'
-
- yaml_from_deleted_diff(path)
- else
- # If we want deleted definition files but find one that wasn't deleted, we return immediately to
- # because non-deleted flags are tested in separate jobs from deleted flags, so we don't need to run
- # a job with just deleted flags.
- return [] if state == 'deleted'
-
- yaml_from_file(path, diffs_dir)
- end
- end
-
- private
-
- attr_reader :files, :state
-
- # The absolute path to the directory of diffs
- #
- # @return [String]
- def diffs_dir
- @diffs_dir ||= Pathname.new(files).expand_path
- end
-
- # Loads the YAML feature flag definition from a file corresponding to a diff of the definition file.
- #
- # @param [Pathname] file_path the path to the diff
- # @param [Pathname] diffs_dir the path to the diffs directory
- # @return [Hash] a hash containing the YAML data from the feature flag definition file corresponding to the diff
- def yaml_from_file(file_path, diffs_dir)
- real_file_path = File.join(Dir.pwd, file_path.to_s.delete_prefix(diffs_dir.to_s)).delete_suffix('.diff')
- YAML.safe_load(File.read(real_file_path))
- end
-
- # Loads the YAML feature flag definition from a diff of the deleted feature flag definition file.
- #
- # @param [Pathname] file_path the path of the diff
- # @return [Hash] a hash containing the YAML data for the feature flag definition from the diff
- def yaml_from_deleted_diff(file_path)
- cleaned_diff = File.read(file_path).gsub(/^[^a-z]+/, '')
- YAML.safe_load(cleaned_diff)
- end
-end
-
-if $PROGRAM_NAME == __FILE__
- options = API::DEFAULT_OPTIONS.dup
-
- OptionParser.new do |opts|
- opts.on("-f", "--files FILES", String, "A directory containing diffs including feature flag definition change diffs") do |value|
- options[:files] = value
- end
-
- opts.on("-s", "--state STATE", String,
- "The desired state of the feature flags (enabled or disabled). If not specified the output will only list the feature flags."
- ) do |value|
- options[:state] = value
- end
-
- opts.on("-h", "--help", "Prints this help") do
- puts opts
- exit
- end
- end.parse!
-
- puts GetFeatureFlagsFromFiles.new(options).extracted_flags
-end
diff --git a/scripts/used-feature-flags b/scripts/feature_flags/used-feature-flags
index 74180d02a91..7bfe4a89634 100755
--- a/scripts/used-feature-flags
+++ b/scripts/feature_flags/used-feature-flags
@@ -3,7 +3,9 @@
require 'set'
require 'fileutils'
-require_relative '../lib/gitlab_edition'
+require_relative '../../lib/gitlab_edition'
+
+ADDITIONAL_EDITIONS = %w[ee jh].freeze
class String
def red
@@ -23,18 +25,19 @@ class String
end
end
-flags_paths = [
- 'config/feature_flags/**/*.yml'
-]
+def add_definition_path!(edition, flag_def_paths)
+ return unless GitlabEdition.public_send(:"#{edition}?") # rubocop:disable GitlabSecurity/PublicSend
-# For EE additionally process `ee/` feature flags
-if GitlabEdition.ee?
- flags_paths << 'ee/config/feature_flags/**/*.yml'
+ flag_def_paths << "#{edition}/config/feature_flags/**/*.yml"
+end
+
+def mark_replicator_flags_as_used(edition)
+ return unless GitlabEdition.public_send(:"#{edition}?") # rubocop:disable GitlabSecurity/PublicSend
# Geo feature flags are constructed dynamically and there's no explicit checks in the codebase so we mark all
# the replicators' derived feature flags as used.
# See https://gitlab.com/gitlab-org/gitlab/-/blob/54e802e8fe76b6f93656d75ef9b566bf57b60f41/ee/lib/gitlab/geo/replicator.rb#L183-185
- Dir.glob('ee/app/replicators/geo/*_replicator.rb').each_with_object(Set.new) do |path, memo|
+ Dir.glob("#{edition}/app/replicators/geo/*_replicator.rb").each do |path|
replicator_name = File.basename(path, '.rb')
feature_flag_name = "geo_#{replicator_name.delete_suffix('_replicator')}_replication"
@@ -42,16 +45,13 @@ if GitlabEdition.ee?
end
end
-# For JH additionally process `jh/` feature flags
-if GitlabEdition.jh?
- flags_paths << 'jh/config/feature_flags/**/*.yml'
-
- Dir.glob('jh/app/replicators/geo/*_replicator.rb').each_with_object(Set.new) do |path, memo|
- replicator_name = File.basename(path, '.rb')
- feature_flag_name = "geo_#{replicator_name.delete_suffix('_replicator')}_replication"
+flag_definition_paths = [
+ 'config/feature_flags/**/*.yml'
+]
- FileUtils.touch(File.join('tmp', 'feature_flags', "#{feature_flag_name}.used"))
- end
+ADDITIONAL_EDITIONS.each do |edition|
+ add_definition_path!(edition, flag_definition_paths)
+ mark_replicator_flags_as_used(edition)
end
all_flags = {}
@@ -59,19 +59,16 @@ additional_flags = Set.new
# Iterate all defined feature flags
# to discover which were used
-flags_paths.each do |flags_path|
- puts flags_path
- Dir.glob(flags_path).each do |path|
- feature_flag_name = File.basename(path, '.yml')
-
- # TODO: we need a better way of tracking use of Gitaly FF across Gitaly and GitLab
- if feature_flag_name.start_with?('gitaly_')
- puts "Skipping the #{feature_flag_name} feature flag since it starts with 'gitaly_'."
- next
- end
-
- all_flags[feature_flag_name] = File.exist?(File.join('tmp', 'feature_flags', feature_flag_name + '.used'))
+Dir.glob(flag_definition_paths).each do |flag_definition_path|
+ feature_flag_name = File.basename(flag_definition_path, '.yml')
+
+ # TODO: we need a better way of tracking use of Gitaly FF across Gitaly and GitLab
+ if feature_flag_name.start_with?('gitaly_')
+ puts "Skipping the #{feature_flag_name} feature flag since it starts with 'gitaly_'."
+ next
end
+
+ all_flags[feature_flag_name] = File.exist?(File.join('tmp', 'feature_flags', "#{feature_flag_name}.used"))
end
# Iterate all used feature flags
@@ -82,8 +79,8 @@ Dir.glob('tmp/feature_flags/*.used').each do |path|
additional_flags.add(feature_flag_name) unless all_flags[feature_flag_name]
end
-used_flags = all_flags.select { |name, used| used }
-unused_flags = all_flags.reject { |name, used| used }
+used_flags = all_flags.select { |_name, used| used }
+unused_flags = all_flags.reject { |_name, used| used }
puts "=========================================".green.bold
puts "Feature Flags usage summary:".green.bold
diff --git a/scripts/frontend/postinstall.js b/scripts/frontend/postinstall.js
index 50052bb806e..07456ef36c9 100644
--- a/scripts/frontend/postinstall.js
+++ b/scripts/frontend/postinstall.js
@@ -1,4 +1,4 @@
-const { execSync } = require('child_process');
+const { spawnSync } = require('child_process');
const chalk = require('chalk');
// check that fsevents is available if we're on macOS
@@ -24,5 +24,7 @@ console.log(`${chalk.green('success')} Dependency postinstall check passed.`);
// Apply any patches to our packages
// See https://gitlab.com/gitlab-org/gitlab/-/issues/336138
-execSync('node_modules/.bin/patch-package --error-on-fail');
-console.log(`${chalk.green('success')} Packages successfully patched.`);
+process.exitCode =
+ spawnSync('node_modules/.bin/patch-package', ['--error-on-fail', '--error-on-warn'], {
+ stdio: ['ignore', 'inherit', 'inherit'],
+ }).status ?? 1;
diff --git a/scripts/generate_rspec_pipeline.rb b/scripts/generate_rspec_pipeline.rb
index 292b3d85b20..1fc37374ba5 100755
--- a/scripts/generate_rspec_pipeline.rb
+++ b/scripts/generate_rspec_pipeline.rb
@@ -110,7 +110,7 @@ class GenerateRspecPipeline
end
def optimal_nodes_count(test_level, rspec_files)
- nodes_count = (rspec_files.size / optimal_test_file_count_per_node_per_test_level(test_level)).ceil
+ nodes_count = (rspec_files.size / optimal_test_file_count_per_node_per_test_level(test_level, rspec_files)).ceil
info "Optimal node count for #{rspec_files.size} #{test_level} RSpec files is #{nodes_count}."
if nodes_count > MAX_NODES_COUNT
@@ -123,14 +123,27 @@ class GenerateRspecPipeline
end
end
- def optimal_test_file_count_per_node_per_test_level(test_level)
+ def optimal_test_file_count_per_node_per_test_level(test_level, rspec_files)
[
- (OPTIMAL_TEST_RUNTIME_DURATION_IN_SECONDS / average_test_file_duration_in_seconds_per_test_level[test_level]),
+ (OPTIMAL_TEST_RUNTIME_DURATION_IN_SECONDS / average_test_file_duration(test_level, rspec_files)),
1
].max
end
- def average_test_file_duration_in_seconds_per_test_level
+ def average_test_file_duration(test_level, rspec_files)
+ if rspec_files.any? && knapsack_report.any?
+ rspec_files_duration = rspec_files.sum do |rspec_file|
+ knapsack_report.fetch(
+ rspec_file, average_test_file_duration_per_test_level[test_level])
+ end
+
+ rspec_files_duration / rspec_files.size
+ else
+ average_test_file_duration_per_test_level[test_level]
+ end
+ end
+
+ def average_test_file_duration_per_test_level
@optimal_test_file_count_per_node_per_test_level ||=
if knapsack_report.any?
remaining_knapsack_report = knapsack_report.dup
diff --git a/scripts/internal_events/monitor.rb b/scripts/internal_events/monitor.rb
new file mode 100644
index 00000000000..b2ef924eb11
--- /dev/null
+++ b/scripts/internal_events/monitor.rb
@@ -0,0 +1,170 @@
+# frozen_string_literal: true
+
+# Internal Events Tracking Monitor
+#
+# This script provides real-time monitoring of Internal Events Tracking-related metrics and Snowplow events.
+#
+# Usage:
+# Run this script in your terminal with specific event names as command-line arguments. It will continuously
+# display relevant metrics and Snowplow events associated with the provided event names.
+#
+# Example:
+# To monitor events 'g_edit_by_web_ide' and 'g_edit_by_sfe', execute:
+# ```
+# bin/rails runner scripts/internal_events/monitor.rb g_edit_by_web_ide g_edit_by_sfe
+# ```
+#
+# Exiting:
+# - To exit the script, press Ctrl+C.
+#
+
+require 'terminal-table'
+require 'net/http'
+
+module ExtendedTimeFrame
+ def weekly_time_range
+ super.tap { |h| h[:end_date] = 1.week.from_now }
+ end
+
+ def monthly_time_range
+ super.tap { |h| h[:end_date] = 1.week.from_now }
+ end
+end
+Gitlab::Usage::TimeFrame.prepend(ExtendedTimeFrame)
+
+def metric_definitions_from_args
+ args = ARGV
+ Gitlab::Usage::MetricDefinition.all.select do |metric|
+ metric.available? && args.any? { |arg| metric.events.key?(arg) }
+ end
+end
+
+def red(text)
+ "\e[31m#{text}\e[0m"
+end
+
+def snowplow_data
+ url = Gitlab::Tracking::Destinations::SnowplowMicro.new.uri.merge('/micro/good')
+ response = Net::HTTP.get_response(url)
+
+ return JSON.parse(response.body) if response.is_a?(Net::HTTPSuccess)
+
+ raise "Request failed: #{response.code}"
+end
+
+def extract_standard_context(event)
+ event['event']['contexts']['data'].each do |context|
+ next unless context['schema'].start_with?('iglu:com.gitlab/gitlab_standard/jsonschema')
+
+ return {
+ user_id: context["data"]["user_id"],
+ namespace_id: context["data"]["namespace_id"],
+ project_id: context["data"]["project_id"],
+ plan: context["data"]["plan"]
+ }
+ end
+ {}
+end
+
+def generate_snowplow_table
+ events = snowplow_data.select { |d| ARGV.include?(d["event"]["se_action"]) }
+ @initial_max_timestamp ||= events.map { |e| e['rawEvent']['parameters']['dtm'].to_i }.max || 0
+
+ rows = []
+ rows << ['Event Name', 'Collector Timestamp', 'user_id', 'namespace_id', 'project_id', 'plan']
+ rows << :separator
+
+ events.each do |event|
+ standard_context = extract_standard_context(event)
+
+ row = [
+ event['event']['se_action'],
+ event['event']['collector_tstamp'],
+ standard_context[:user_id],
+ standard_context[:namespace_id],
+ standard_context[:project_id],
+ standard_context[:plan]
+ ]
+
+ row.map! { |value| red(value) } if event['rawEvent']['parameters']['dtm'].to_i > @initial_max_timestamp
+
+ rows << row
+ end
+
+ Terminal::Table.new(
+ title: 'SNOWPLOW EVENTS',
+ rows: rows
+ )
+end
+
+def relevant_events_from_args(metric_definition)
+ metric_definition.events.keys.intersection(ARGV).sort
+end
+
+def generate_metrics_table
+ metric_definitions = metric_definitions_from_args
+ rows = []
+ rows << ['Key Path', 'Monitored Events', 'Instrumentation Class', 'Initial Value', 'Current Value']
+ rows << :separator
+
+ @initial_values ||= {}
+
+ metric_definitions.sort_by(&:key).each do |definition|
+ metric = Gitlab::Usage::Metric.new(definition)
+ value = metric.send(:instrumentation_object).value # rubocop:disable GitlabSecurity/PublicSend
+ @initial_values[definition.key] ||= value
+
+ initial_value = @initial_values[definition.key]
+
+ value = red(value) if initial_value != value
+
+ rows << [
+ definition.key,
+ relevant_events_from_args(definition).join(', '),
+ definition.instrumentation_class,
+ initial_value,
+ value
+ ]
+ end
+
+ Terminal::Table.new(
+ title: 'RELEVANT METRICS',
+ rows: rows
+ )
+end
+
+begin
+ snowplow_data
+rescue Errno::ECONNREFUSED
+ puts "Could not connect to Snowplow Micro."
+ puts "Please follow these instruction to set up Snowplow Micro:"
+ puts "https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/howto/snowplow_micro.md"
+ exit 1
+end
+
+print "\e[?1049h" # Stores the original screen buffer
+print "\e[H" # Moves the cursor home
+begin
+ loop do
+ metrics_table = generate_metrics_table
+ events_table = generate_snowplow_table
+
+ print "\e[H" # Moves the cursor home
+ print "\e[2J" # Clears the screen buffer
+
+ puts "Updated at #{Time.current}"
+ puts "Monitored events: #{ARGV.join(', ')}"
+ puts
+
+ puts metrics_table
+
+ puts events_table
+
+ sleep 1
+ end
+rescue Interrupt
+ # Quietly shut down
+ensure
+ print "\e[?1049l" # Restores the original screen buffer
+ print "\e[H" # Moves the cursor home
+end
diff --git a/scripts/lint-doc.sh b/scripts/lint-doc.sh
index 6683802c2fe..b16d9042f75 100755
--- a/scripts/lint-doc.sh
+++ b/scripts/lint-doc.sh
@@ -132,7 +132,12 @@ fi
# Run Vale and Markdownlint only on changed files. Only works on merged results
# pipelines, so first checks if a merged results CI variable is present. If not present,
# runs test on all files.
-if [ -z "${CI_MERGE_REQUEST_TARGET_BRANCH_SHA}" ]
+if [ -n "$1" ]
+then
+ MD_DOC_PATH="$@"
+ # shellcheck disable=2059
+ printf "${COLOR_GREEN}INFO: List of files specified on command line. Running Markdownlint and Vale for only those files...${COLOR_RESET}\n"
+elif [ -z "${CI_MERGE_REQUEST_TARGET_BRANCH_SHA}" ]
then
MD_DOC_PATH=${MD_DOC_PATH:-doc}
# shellcheck disable=2059
@@ -157,13 +162,14 @@ fi
function run_locally_or_in_container() {
local cmd=$1
local args=$2
+ local files=$3
local registry_url="registry.gitlab.com/gitlab-org/gitlab-docs/lint-markdown:alpine-3.16-vale-2.22.0-markdownlint-0.32.2-markdownlint2-0.6.0"
if hash "${cmd}" 2>/dev/null
then
# shellcheck disable=2059
printf "${COLOR_GREEN}INFO: Found locally-installed ${cmd}! Running...${COLOR_RESET}\n"
- $cmd $args
+ $cmd $args $files
# When using software like Rancher Desktop, both nerdctl and docker binaries are available
# but only one is configured. To check which one to use, we need to probe each runtime
elif (hash nerdctl 2>/dev/null) && (nerdctl info > /dev/null 2>&1)
@@ -207,7 +213,7 @@ fi
# shellcheck disable=2059
printf "${COLOR_GREEN}INFO: Looking for Vale to lint prose, either installed locally or available in documentation linting image...${COLOR_RESET}\n"
-run_locally_or_in_container 'vale' "--minAlertLevel error --output=doc/.vale/vale.tmpl ${MD_DOC_PATH}"
+run_locally_or_in_container 'vale' "--minAlertLevel error --output=doc/.vale/vale.tmpl" "${MD_DOC_PATH}"
if [ "$ERRORCODE" -ne 0 ]
then
diff --git a/scripts/lint-docs-redirects.rb b/scripts/lint-docs-redirects.rb
index fb4ac19981d..36567571397 100755
--- a/scripts/lint-docs-redirects.rb
+++ b/scripts/lint-docs-redirects.rb
@@ -9,6 +9,7 @@ require 'net/http'
require 'uri'
require 'json'
require 'cgi'
+require 'yaml'
class LintDocsRedirect
COLOR_CODE_RED = "\e[31m"
@@ -26,6 +27,7 @@ class LintDocsRedirect
abort_unless_merge_request_iid_exists
check_renamed_deleted_files
+ check_for_circular_redirects
end
private
@@ -165,12 +167,16 @@ class LintDocsRedirect
end
end
+ def doc_file?(file)
+ file['old_path'].start_with?('doc/') && file['old_path'].end_with?('.md')
+ end
+
def renamed_doc_file?(file)
- file['renamed_file'] == true && file['old_path'].start_with?('doc')
+ file['renamed_file'] == true && doc_file?(file)
end
def deleted_doc_file?(file)
- file['deleted_file'] == true && file['old_path'].start_with?('doc')
+ file['deleted_file'] == true && doc_file?(file)
end
# Create a list of hashes of the renamed documentation files
@@ -198,6 +204,48 @@ class LintDocsRedirect
check_for_missing_nav_entry(file)
end
end
+
+ # Search for '+redirect_to' in the diff to find the new value. It should
+ # return a string of "+redirect_to: 'file.md'", in which case, delete the
+ # '+' prefix. If not found, skip and go to next file.
+ def redirect_to(diff_file)
+ redirect_to = diff_file["diff"]
+ .lines
+ .find { |e| e.include?('+redirect_to') }
+ &.delete_prefix('+')
+
+ return if redirect_to.nil?
+
+ YAML.safe_load(redirect_to)['redirect_to']
+ end
+
+ def all_doc_files
+ merge_request_diff.select do |file|
+ doc_file?(file)
+ end
+ end
+
+ # Check if a page redirects to itself
+ def check_for_circular_redirects
+ all_doc_files.each do |file|
+ next if redirect_to(file).nil?
+
+ basename = File.basename(file['old_path'])
+
+ # Fail if the 'redirect_to' value is the same as the file's basename.
+ next unless redirect_to(file) == basename
+
+ warn <<~WARNING
+ #{COLOR_CODE_RED}✖ ERROR: Circular redirect detected. The 'redirect_to' value points to the same file.#{COLOR_CODE_RESET}
+ WARNING
+
+ puts
+ puts "File : #{file['old_path']}"
+ puts "Redirect to : #{redirect_to(file)}"
+
+ abort
+ end
+ end
end
LintDocsRedirect.new.execute if $PROGRAM_NAME == __FILE__
diff --git a/scripts/pipeline/average_reports.rb b/scripts/pipeline/average_reports.rb
new file mode 100755
index 00000000000..d628ddfbf66
--- /dev/null
+++ b/scripts/pipeline/average_reports.rb
@@ -0,0 +1,66 @@
+#!/usr/bin/env ruby
+# frozen_string_literal: true
+
+require 'json'
+require 'optparse'
+
+class AverageReports
+ attr_reader :initial_report_file, :initial_report_data, :report_file_to_data_map
+
+ def initialize(initial_report_file:, new_report_files:)
+ @initial_report_file = initial_report_file
+ @initial_report_data = parse_json_from_report_file(initial_report_file)
+
+ @report_file_to_data_map = new_report_files.each_with_object({}) do |report_file, map|
+ next unless File.exist?(report_file)
+
+ map[report_file] ||= parse_json_from_report_file(report_file)
+ end
+ end
+
+ def execute
+ puts "Updating #{initial_report_file} with #{report_file_to_data_map.size} new reports..."
+
+ compound_reports = report_file_to_data_map.keys.each_with_object({}) do |report_file, result|
+ report = report_file_to_data_map[report_file]
+
+ report.each do |spec, duration|
+ result[spec] ||= [*initial_report_data[spec]]
+ result[spec] << duration
+ end
+
+ puts "Updated #{report.size} data points from #{report_file}"
+ end
+
+ averaged_reports = compound_reports.transform_values do |durations|
+ durations.sum.to_f / durations.size
+ end
+
+ File.write(initial_report_file, JSON.pretty_generate(averaged_reports))
+ puts "Saved #{initial_report_file}."
+
+ averaged_reports
+ end
+
+ private
+
+ def parse_json_from_report_file(report_file)
+ JSON.parse(File.read(report_file))
+ end
+end
+
+if $PROGRAM_NAME == __FILE__
+ options = {}
+
+ OptionParser.new do |opts|
+ opts.on("-i", "--initial-report initial_report_file", String, 'Initial report file name') do |value|
+ options[:initial_report_file] = value
+ end
+
+ opts.on("-n", "--new-reports new_report_files", Array, 'New report file names delimited by ","') do |values|
+ options[:new_report_files] = values
+ end
+ end.parse!
+
+ AverageReports.new(**options).execute
+end
diff --git a/scripts/pipeline/create_test_failure_issues.rb b/scripts/pipeline/create_test_failure_issues.rb
deleted file mode 100755
index e4bcabb6223..00000000000
--- a/scripts/pipeline/create_test_failure_issues.rb
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/env ruby
-# frozen_string_literal: true
-
-require 'optparse'
-require 'json'
-require 'httparty'
-
-require_relative '../api/create_issue'
-require_relative '../api/find_issues'
-require_relative '../api/update_issue'
-
-class CreateTestFailureIssues
- DEFAULT_OPTIONS = {
- project: nil,
- tests_report_file: 'tests_report.json',
- issue_json_folder: 'tmp/issues/'
- }.freeze
-
- def initialize(options)
- @options = options
- end
-
- def execute
- puts "[CreateTestFailureIssues] No failed tests!" if failed_tests.empty?
-
- failed_tests.each_with_object([]) do |failed_test, existing_issues|
- CreateTestFailureIssue.new(options.dup).upsert(failed_test, existing_issues).tap do |issue|
- existing_issues << issue
- File.write(File.join(options[:issue_json_folder], "issue-#{issue.iid}.json"), JSON.pretty_generate(issue.to_h))
- end
- end
- end
-
- private
-
- attr_reader :options
-
- def failed_tests
- @failed_tests ||=
- if File.exist?(options[:tests_report_file])
- JSON.parse(File.read(options[:tests_report_file]))
- else
- puts "[CreateTestFailureIssues] #{options[:tests_report_file]} doesn't exist!"
- []
- end
- end
-end
-
-class CreateTestFailureIssue
- MAX_TITLE_LENGTH = 255
- WWW_GITLAB_COM_SITE = 'https://about.gitlab.com'
- WWW_GITLAB_COM_GROUPS_JSON = "#{WWW_GITLAB_COM_SITE}/groups.json".freeze
- WWW_GITLAB_COM_CATEGORIES_JSON = "#{WWW_GITLAB_COM_SITE}/categories.json".freeze
- FEATURE_CATEGORY_METADATA_REGEX = /(?<=feature_category: :)\w+/
- DEFAULT_LABELS = ['type::maintenance', 'test'].freeze
-
- def self.server_host
- @server_host ||= ENV.fetch('CI_SERVER_HOST', 'gitlab.com')
- end
-
- def self.project_path
- @project_path ||= ENV.fetch('CI_PROJECT_PATH', 'gitlab-org/gitlab')
- end
-
- def self.file_base_url
- @file_base_url ||= "https://#{server_host}/#{project_path}/-/blob/master/"
- end
-
- def self.report_item_regex
- @report_item_regex ||= %r{^1\. \d{4}-\d{2}-\d{2}: https://#{server_host}/#{project_path}/-/jobs/.+$}
- end
-
- def initialize(options)
- @project = options.delete(:project)
- @api_token = options.delete(:api_token)
- end
-
- def upsert(failed_test, existing_issues = [])
- existing_issue = find(failed_test, existing_issues)
-
- if existing_issue
- update_reports(existing_issue, failed_test)
- existing_issue
- else
- create(failed_test)
- end
- end
-
- private
-
- attr_reader :project, :api_token
-
- def find(failed_test, existing_issues = [])
- test_hash = failed_test_hash(failed_test)
- issue_from_existing_issues = existing_issues.find { |issue| issue.title.include?(test_hash) }
- issue_from_issue_tracker = FindIssues
- .new(project: project, api_token: api_token)
- .execute(state: :opened, search: test_hash, in: :title, per_page: 1)
- .first
-
- existing_issue = issue_from_existing_issues || issue_from_issue_tracker
-
- return unless existing_issue
-
- puts "[CreateTestFailureIssue] Found issue '#{existing_issue.title}': #{existing_issue.web_url}!"
-
- existing_issue
- end
-
- def update_reports(existing_issue, failed_test)
- # We count the number of existing reports.
- reports_count = existing_issue.description
- .scan(self.class.report_item_regex)
- .size.to_i + 1
-
- # We include the number of reports in the header, for visibility.
- issue_description = existing_issue.description.sub(/^### Reports.*$/, "### Reports (#{reports_count})")
-
- # We add the current failure to the list of reports.
- issue_description = "#{issue_description}\n#{report_list_item(failed_test)}"
-
- UpdateIssue
- .new(project: project, api_token: api_token)
- .execute(
- existing_issue.iid,
- description: issue_description,
- weight: reports_count
- )
- puts "[CreateTestFailureIssue] Added a report in '#{existing_issue.title}': #{existing_issue.web_url}!"
- end
-
- def create(failed_test)
- payload = {
- title: failed_test_issue_title(failed_test),
- description: failed_test_issue_description(failed_test),
- labels: failed_test_issue_labels(failed_test),
- weight: 1
- }
-
- CreateIssue.new(project: project, api_token: api_token).execute(payload).tap do |issue|
- puts "[CreateTestFailureIssue] Created issue '#{issue.title}': #{issue.web_url}!"
- end
- end
-
- def failed_test_hash(failed_test)
- Digest::SHA256.hexdigest(failed_test['file'] + failed_test['name'])[0...12]
- end
-
- def failed_test_issue_title(failed_test)
- title = "#{failed_test['file']} [test-hash:#{failed_test_hash(failed_test)}]"
-
- raise "Title is too long!" if title.size > MAX_TITLE_LENGTH
-
- title
- end
-
- def test_file_link(failed_test)
- "[`#{failed_test['file']}`](#{self.class.file_base_url}#{failed_test['file']})"
- end
-
- def report_list_item(failed_test)
- "1. #{Time.new.utc.strftime('%F')}: #{failed_test['job_url']} (#{ENV['CI_PIPELINE_URL']})"
- end
-
- def failed_test_issue_description(failed_test)
- <<~DESCRIPTION
- ### Test description
-
- `#{search_safe(failed_test['name'])}`
-
- ### Test file path
-
- #{test_file_link(failed_test)}
-
- <!-- Don't add anything after the report list since it's updated automatically -->
- ### Reports (1)
-
- #{report_list_item(failed_test)}
- DESCRIPTION
- end
-
- def failed_test_issue_labels(failed_test)
- labels = DEFAULT_LABELS + category_and_group_labels_for_test_file(failed_test['file'])
-
- # make sure we don't spam people who are notified to actual labels
- labels.map { |label| "wip-#{label}" }
- end
-
- def category_and_group_labels_for_test_file(test_file)
- feature_categories = File.open(File.expand_path(File.join('..', '..', test_file), __dir__))
- .read
- .scan(FEATURE_CATEGORY_METADATA_REGEX)
-
- category_labels = feature_categories.filter_map { |category| categories_mapping.dig(category, 'label') }.uniq
-
- groups = feature_categories.filter_map { |category| categories_mapping.dig(category, 'group') }
- group_labels = groups.map { |group| groups_mapping.dig(group, 'label') }.uniq
-
- (category_labels + [group_labels.first]).compact
- end
-
- def categories_mapping
- @categories_mapping ||= self.class.fetch_json(WWW_GITLAB_COM_CATEGORIES_JSON)
- end
-
- def groups_mapping
- @groups_mapping ||= self.class.fetch_json(WWW_GITLAB_COM_GROUPS_JSON)
- end
-
- def search_safe(value)
- value.delete('"')
- end
-
- def self.fetch_json(json_url)
- json = with_retries { HTTParty.get(json_url, format: :plain) } # rubocop:disable Gitlab/HTTParty
- JSON.parse(json)
- end
-
- def self.with_retries(attempts: 3)
- yield
- rescue Errno::ECONNRESET, OpenSSL::SSL::SSLError, Net::OpenTimeout
- retry if (attempts -= 1) > 0
- raise
- end
- private_class_method :with_retries
-end
-
-if $PROGRAM_NAME == __FILE__
- options = CreateTestFailureIssues::DEFAULT_OPTIONS.dup
-
- OptionParser.new do |opts|
- opts.on("-p", "--project PROJECT", String,
- "Project where to create the issue (defaults to " \
- "`#{CreateTestFailureIssues::DEFAULT_OPTIONS[:project]}`)") do |value|
- options[:project] = value
- end
-
- opts.on("-r", "--tests-report-file file_path", String,
- "Path to a JSON file which contains the current pipeline's tests report (defaults to " \
- "`#{CreateTestFailureIssues::DEFAULT_OPTIONS[:tests_report_file]}`)"
- ) do |value|
- options[:tests_report_file] = value
- end
-
- opts.on("-f", "--issues-json-folder file_path", String,
- "Path to a folder where to save the issues JSON data (defaults to " \
- "`#{CreateTestFailureIssues::DEFAULT_OPTIONS[:issue_json_folder]}`)") do |value|
- options[:issue_json_folder] = value
- end
-
- opts.on("-t", "--api-token API_TOKEN", String,
- "A valid Project token with the `Reporter` role and `api` scope to create the issue") do |value|
- options[:api_token] = value
- end
-
- opts.on("-h", "--help", "Prints this help") do
- puts opts
- exit
- end
- end.parse!
-
- CreateTestFailureIssues.new(options).execute
-end
diff --git a/scripts/qa/quarantine-types-check b/scripts/qa/quarantine-types-check
index 188348b949c..8c2768b6722 100755
--- a/scripts/qa/quarantine-types-check
+++ b/scripts/qa/quarantine-types-check
@@ -7,8 +7,8 @@ QUARANTINE_TYPES = %w[stale bug investigating flaky broken test_environment wait
missing_issues = []
quarantine_type_errors = []
-invalid_type_message = %"\n*** The following quarantined tests have invalid types:\n\n%s\n"
-missing_issue_message = %"\n*** The following quarantined tests are missing issue links:\n\n%s\n"
+invalid_type_message = %(\n*** The following quarantined tests have invalid types:\n\n%s\n)
+missing_issue_message = %(\n*** The following quarantined tests are missing issue links:\n\n%s\n)
test_metadata_file = ARGV.shift
diff --git a/scripts/qa/testcases-check b/scripts/qa/testcases-check
index 2bc1ea2c5c7..fad5f620e02 100755
--- a/scripts/qa/testcases-check
+++ b/scripts/qa/testcases-check
@@ -9,9 +9,9 @@ testcases = []
missing_testcases = []
formatted_duplicates = []
testcase_format_errors = []
-missing_message = %"\n*** The following tests are missing testcase links:\n\n%s\n"
-duplicate_message = %"\n*** The following tests have duplicate testcase links:\n\n%s"
-format_message = %"\n*** The following testcase links are incorrectly formatted:\n\n%s\n"
+missing_message = %(\n*** The following tests are missing testcase links:\n\n%s\n)
+duplicate_message = %(\n*** The following tests have duplicate testcase links:\n\n%s)
+format_message = %(\n*** The following testcase links are incorrectly formatted:\n\n%s\n)
test_metadata_file = ARGV.shift
diff --git a/scripts/regenerate-schema b/scripts/regenerate-schema
index 67c58339c6c..f1018403395 100755
--- a/scripts/regenerate-schema
+++ b/scripts/regenerate-schema
@@ -52,8 +52,8 @@ class SchemaRegenerator
def checkout_ref
return unless ci?
- run %[git checkout #{source_ref}]
- run %q[git clean -f -- db]
+ run %(git checkout #{source_ref})
+ run %q(git clean -f -- db)
end
##
@@ -71,8 +71,8 @@ class SchemaRegenerator
return false unless project_url
return false unless target_project_url
- run %[git remote add target_project #{target_project_url}.git]
- run %[git fetch target_project #{target_branch}:#{target_branch}]
+ run %(git remote add target_project #{target_project_url}.git)
+ run %(git fetch target_project #{target_branch}:#{target_branch})
local_checkout_clean_schema
end
@@ -83,8 +83,8 @@ class SchemaRegenerator
# Ask git to checkout the schema from the target branch and reset
# the file to unstage the changes.
def local_checkout_clean_schema
- run %[git checkout #{merge_base} -- #{FILENAME}]
- run %[git reset -- #{FILENAME}]
+ run %(git checkout #{merge_base} -- #{FILENAME})
+ run %(git reset -- #{FILENAME})
end
##
@@ -152,19 +152,19 @@ class SchemaRegenerator
##
# Stop spring before modifying the database
def stop_spring
- run %q[bin/spring stop]
+ run %q(bin/spring stop)
end
##
# Run rake task to reset the database.
def reset_db
- run %q[bin/rails db:reset RAILS_ENV=test]
+ run %q(bin/rails db:reset RAILS_ENV=test)
end
##
# Run rake task to run migrations.
def migrate
- run %q[bin/rails db:migrate RAILS_ENV=test]
+ run %q(bin/rails db:migrate RAILS_ENV=test)
end
##
diff --git a/scripts/review_apps/base-config.yaml b/scripts/review_apps/base-config.yaml
index 9b77ff80d42..a425aecc86b 100644
--- a/scripts/review_apps/base-config.yaml
+++ b/scripts/review_apps/base-config.yaml
@@ -32,18 +32,13 @@ gitlab:
gitaly:
resources:
requests:
- # Based on https://console.cloud.google.com/monitoring/metrics-explorer;endTime=2023-04-19T08:37:33.183Z;startTime=2023-02-05T09:37:33.182Z?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_MEAN%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22gitaly%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_MEAN%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
- #
- # Data over the 3 months (2023-02-24 - 2023-04-19)
- #
- # The average seems to be around 0.100vCPU (setting request accordingly). Note that this is a guesstimate based on the chart.
- #
- # The maximum CPU usage was 0.196vCPU (setting limit accordingly)
- cpu: 150m
- memory: 600Mi
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22gitaly%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_NONE%22,%22secondaryGroupByFields%22:%5B%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ cpu: 100m
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22gitaly%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 400Mi
limits:
- cpu: 300m
- memory: 1000Mi
+ cpu: 200m
+ memory: 800Mi
persistence:
size: 10Gi
storageClass: ssd
@@ -58,18 +53,13 @@ gitlab:
gitlab-shell:
resources:
requests:
- # Based on https://console.cloud.google.com/monitoring/metrics-explorer;endTime=2023-04-19T08:37:33.183Z;startTime=2023-02-05T09:37:33.182Z?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_MEAN%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22gitlab-shell%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_MEAN%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
- #
- # Data over the 3 months (2023-02-24 - 2023-04-19)
- #
- # The average seems to be around 0.01vCPU (setting request accordingly). Note that this is a guesstimate based on the chart.
- #
- # The maximum CPU usage was 0.127vCPU (setting limit accordingly)
- cpu: 10m
- memory: 100Mi
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22gitlab-shell%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_NONE%22,%22secondaryGroupByFields%22:%5B%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ cpu: 12m
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22gitlab-shell%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 20Mi
limits:
- cpu: 150m
- memory: 150Mi
+ cpu: 90m
+ memory: 40Mi
minReplicas: 1
maxReplicas: 1
hpa:
@@ -89,20 +79,24 @@ gitlab:
migrations:
resources:
requests:
- cpu: 400m
- memory: 920Mi
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22migrations%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_NONE%22,%22secondaryGroupByFields%22:%5B%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ cpu: 600m
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22migrations%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 800Mi
limits:
- cpu: 1000m
- memory: 1380Mi
+ cpu: 900m
+ memory: 1200Mi
sidekiq:
resources:
requests:
- cpu: 855m
- memory: 1927Mi
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22sidekiq%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_NONE%22,%22secondaryGroupByFields%22:%5B%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ cpu: 400m
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22sidekiq%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 1300Mi
limits:
- cpu: 1282m
- memory: 2890Mi
+ cpu: 700m
+ memory: 1800Mi
hpa:
cpu:
targetAverageValue: 650m
@@ -110,27 +104,28 @@ gitlab:
toolbox:
resources:
requests:
- # Based on https://console.cloud.google.com/monitoring/metrics-explorer;endTime=2023-04-19T08:37:33.183Z;startTime=2023-02-05T09:37:33.182Z?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_MEAN%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22toolbox%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_MEAN%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
- #
- # Data over the 3 months (2023-02-24 - 2023-04-19)
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P2D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_PERCENTILE_99%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22toolbox%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_PERCENTILE_99%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
#
- # The average seems to be around 0.100vCPU. Note that this is a guesstimate based on the chart.
- #
- # The maximum CPU usage was 0.250vCPU (setting limit accordingly)
- cpu: 150m
- memory: 1927Mi
+ # We actually don't usage the average of 0.03vCPU, since the container resources usage jumps at deploy time, but then stays very low for the rest of the life of the review app.
+ # Since review apps aren't deployed at the same time, the mean is actually lower than the actual "max" component usage, which happens at deploy time only.
+ # We use the p99 without grouping by the cluster name since the usage isn't sustained. The p99 is around 0.3vCPU.
+ cpu: 300m
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P2D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_PERCENTILE_99%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_PERCENTILE_99%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22toolbox%5C%22%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 675Mi
limits:
- cpu: 450m
- memory: 3500Mi
+ cpu: 480m
+ memory: 1000Mi
webservice:
resources:
requests:
- cpu: 746m
- memory: 2809Mi
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22webservice%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_NONE%22,%22secondaryGroupByFields%22:%5B%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ cpu: 500m
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22webservice%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 2500Mi
limits:
- cpu: 1400m
- memory: 4214Mi
+ cpu: 1200m
+ memory: 3750Mi
minReplicas: 1
maxReplicas: 1
deployment:
@@ -141,11 +136,13 @@ gitlab:
workhorse:
resources:
requests:
- cpu: 400m
- memory: 75Mi
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22gitlab-workhorse%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_NONE%22,%22secondaryGroupByFields%22:%5B%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ cpu: 12m
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22gitlab-workhorse%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 20Mi
limits:
- cpu: 600m
- memory: 113Mi
+ cpu: 30m
+ memory: 50Mi
readinessProbe:
initialDelaySeconds: 5 # Default is 0
periodSeconds: 15 # Default is 10
@@ -155,13 +152,8 @@ gitlab-runner:
resources:
requests:
# Based on https://console.cloud.google.com/monitoring/metrics-explorer;endTime=2023-04-19T08:37:33.183Z;startTime=2023-02-05T09:37:33.182Z?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_MEAN%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3Dmonitoring.regex.full_match(%5C%22.*gitlab-runner$%5C%22)%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_MEAN%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
- #
- # Data over the 3 months (2023-02-24 - 2023-04-19)
- #
- # The average seems to be around 0.01vCPU. Note that this is a guesstimate based on the chart.
- #
- # The maximum CPU usage was 0.015vCPU (setting limit accordingly)
- cpu: 10m
+ cpu: 200m
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3Dmonitoring.regex.full_match(%5C%22.*gitlab-runner$%5C%22)%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
memory: 100Mi
limits:
# In case somebody would like to use runners in review-apps, we set the limit higher than the requests
@@ -191,18 +183,13 @@ nginx-ingress:
ssl-ciphers: ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES128-SHA:AES256-GCM-SHA384:AES128-GCM-SHA256:AES256-SHA256:AES128-SHA256:AES256-SHA:AES128-SHA:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4
resources:
requests:
- # Based on https://console.cloud.google.com/monitoring/metrics-explorer;endTime=2023-04-19T08:37:33.183Z;startTime=2023-02-05T09:37:33.182Z?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_MEAN%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22controller%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_MEAN%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
- #
- # Data over the 3 months (2023-02-24 - 2023-04-19)
- #
- # The average seems to be around 0.02vCPU. Note that this is a guesstimate based on the chart.
- #
- # The maximum CPU usage was 0.07vCPU (setting limit accordingly)
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22controller%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_NONE%22,%22secondaryGroupByFields%22:%5B%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
cpu: 10m
- memory: 450Mi
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22controller%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 70Mi
limits:
cpu: 20m
- memory: 675Mi
+ memory: 150Mi
service:
enableHttp: false
livenessProbe:
@@ -225,20 +212,6 @@ nginx-ingress:
postgresql:
metrics:
enabled: false
- resources:
- requests:
- # Based on https://console.cloud.google.com/monitoring/metrics-explorer;endTime=2023-04-19T08:37:33.183Z;startTime=2023-02-05T09:37:33.182Z?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_MEAN%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3Dmonitoring.regex.full_match(%5C%22.*-postgresql$%5C%22)%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_MEAN%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
- #
- # Data over the 3 months (2023-02-24 - 2023-04-19)
- #
- # The average seems to be around 0.150vCPU. Note that this is a guesstimate based on the chart.
- #
- # The maximum CPU usage was 0.420vCPU (setting limit accordingly)
- cpu: 150m
- memory: 1000Mi
- limits:
- cpu: 1000m
- memory: 1800Mi
master:
nodeSelector:
preemptible: "false"
@@ -251,43 +224,24 @@ prometheus:
redis:
metrics:
enabled: false
- resources:
- requests:
- # Based on https://console.cloud.google.com/monitoring/metrics-explorer;endTime=2023-04-19T08:37:33.183Z;startTime=2023-02-05T09:37:33.182Z?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_MEAN%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22redis%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_MEAN%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
- #
- # Data over the 3 months (2023-02-24 - 2023-04-19)
- #
- # The average seems to be around 0.03vCPU. Note that this is a guesstimate based on the chart.
- #
- # The maximum CPU usage was 0.500vCPU (setting limit accordingly)
- cpu: 10m
- memory: 60Mi
- limits:
- cpu: 500m
- memory: 130Mi
master:
nodeSelector:
- preemptible: "true"
+ preemptible: "false"
podAnnotations:
<<: *safe-to-evict
registry:
hpa:
- minReplicas: 1
- maxReplicas: 1
+ minReplicas: 2
+ maxReplicas: 2
resources:
- # Based on https://console.cloud.google.com/monitoring/metrics-explorer;endTime=2023-04-19T08:37:33.183Z;startTime=2023-02-05T09:37:33.182Z?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_MEAN%22,%22groupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22registry%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_MEAN%22,%22secondaryGroupByFields%22:%5B%22resource.label.%5C%22namespace_name%5C%22%22%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22COLOR%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
- #
- # Data over the 3 months (2023-02-24 - 2023-04-19)
- #
- # The average seems to be around 0.0005vCPU. Note that this is a guesstimate based on the chart.
- #
- # The maximum CPU usage was 0.0.003vCPU (setting limit accordingly)
requests:
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_RATE%22%7D,%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fcpu%2Fcore_usage_time%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22registry%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_RATE%22,%22secondaryCrossSeriesReducer%22:%22REDUCE_NONE%22,%22secondaryGroupByFields%22:%5B%5D%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
cpu: 10m
- memory: 30Mi
+ # Based on https://console.cloud.google.com/monitoring/metrics-explorer;duration=P14D?pageState=%7B%22xyChart%22:%7B%22constantLines%22:%5B%5D,%22dataSets%22:%5B%7B%22plotType%22:%22LINE%22,%22targetAxis%22:%22Y1%22,%22timeSeriesFilter%22:%7B%22aggregations%22:%5B%7B%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22groupByFields%22:%5B%5D,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%5D,%22apiSource%22:%22DEFAULT_CLOUD%22,%22crossSeriesReducer%22:%22REDUCE_NONE%22,%22filter%22:%22metric.type%3D%5C%22kubernetes.io%2Fcontainer%2Fmemory%2Fused_bytes%5C%22%20resource.type%3D%5C%22k8s_container%5C%22%20resource.label.%5C%22container_name%5C%22%3D%5C%22registry%5C%22%22,%22groupByFields%22:%5B%5D,%22minAlignmentPeriod%22:%2260s%22,%22perSeriesAligner%22:%22ALIGN_MEAN%22%7D%7D%5D,%22options%22:%7B%22mode%22:%22STATS%22%7D,%22y1Axis%22:%7B%22label%22:%22%22,%22scale%22:%22LINEAR%22%7D%7D%7D&project=gitlab-review-apps
+ memory: 20Mi
limits:
cpu: 50m
- memory: 45Mi
+ memory: 40Mi
nodeSelector:
preemptible: "true"
diff --git a/scripts/review_apps/review-apps.sh b/scripts/review_apps/review-apps.sh
index 728763e56d7..ab1675871ee 100755
--- a/scripts/review_apps/review-apps.sh
+++ b/scripts/review_apps/review-apps.sh
@@ -451,24 +451,40 @@ function verify_commit_sha() {
function display_deployment_debug() {
local namespace="${CI_ENVIRONMENT_SLUG}"
- # Install dig to inspect DNS entries
- apk add -q bind-tools
+ echo
+ echoinfo "*************************************************************************************"
+ echoinfo "*********************************** DEBUGGING DATA **********************************"
+ echoinfo "*************************************************************************************"
+ echo
- echoinfo "[debugging data] Check review-app webservice DNS entry:"
- dig +short $(echo "${CI_ENVIRONMENT_URL}" | sed 's~http[s]*://~~g')
-
- echoinfo "[debugging data] Check external IP for nginx-ingress-controller service (should be THE SAME AS the DNS entry IP above):"
- kubectl -n "${namespace}" get svc "${namespace}-nginx-ingress-controller" -o jsonpath='{.status.loadBalancer.ingress[].ip}'
-
- echoinfo "[debugging data] k8s resources:"
+ echoinfo "k8s resources:"
kubectl -n "${namespace}" get pods
- echoinfo "[debugging data] PostgreSQL logs:"
+ echoinfo "PostgreSQL logs:"
kubectl -n "${namespace}" logs -l app=postgresql --all-containers
- echoinfo "[debugging data] DB migrations logs:"
+ echoinfo "DB migrations logs:"
kubectl -n "${namespace}" logs -l app=migrations --all-containers
- echoinfo "[debugging data] Webservice logs:"
+ echoinfo "Webservice logs:"
kubectl -n "${namespace}" logs -l app=webservice -c webservice
+
+ echo
+ echoinfo "*************************************************************************************"
+ echoinfo "********************** This job failed. Should you restart it? **********************"
+ echoinfo "*************************************************************************************"
+ echo
+ echo "If it seems to be an infrastructure issue from the job output, please restart this job."
+ echo
+ echo "IMPORTANT: Error: \"UPGRADE FAILED: Get XXX : context deadline exceeded\" is not necessarily an infrastructure issue."
+ echo "It just means that the review-app could not be deployed successfully (e.g. the app or one of its component could not boot successfully)"
+ echo
+ echo "If you're unsure, have a look at the errors raised in Sentry for this review-app:"
+ echo "https://new-sentry.gitlab.net/organizations/gitlab/releases/$(echo "${CI_COMMIT_SHA}" | cut -c1-11)/?project=19&issuesType=all"
+ echo ""
+ echo "if it does not look like an error due to some changes done in the MR, please restart this job."
+ echo
+ echoinfo "*************************************************************************************"
+ echoinfo "*************************************************************************************"
+ echoinfo "*************************************************************************************"
}
diff --git a/scripts/rspec_helpers.sh b/scripts/rspec_helpers.sh
index eefd9ed4993..46ffbc223eb 100644
--- a/scripts/rspec_helpers.sh
+++ b/scripts/rspec_helpers.sh
@@ -23,9 +23,16 @@ function update_tests_metadata() {
local rspec_flaky_folder_path="$(dirname "${FLAKY_RSPEC_SUITE_REPORT_PATH:-unknown_folder}")/"
local knapsack_folder_path="$(dirname "${KNAPSACK_RSPEC_SUITE_REPORT_PATH:-unknown_folder}")/"
- echo "{}" > "${KNAPSACK_RSPEC_SUITE_REPORT_PATH:-unknown_file}"
+ curl -f --location -o "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" "https://gitlab-org.gitlab.io/gitlab/${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" ||
+ echo "{}" > "${KNAPSACK_RSPEC_SUITE_REPORT_PATH:-unknown_file}"
- scripts/merge-reports "${KNAPSACK_RSPEC_SUITE_REPORT_PATH:-unknown_file}" ${knapsack_folder_path:-unknown_folder}rspec*.json
+ if [[ "$AVERAGE_KNAPSACK_REPORT" == "true" ]]; then
+ # a comma separated list of file names matching the glob
+ local new_reports="$(printf '%s,' ${knapsack_folder_path:-unknown_folder}rspec*.json)"
+ scripts/pipeline/average_reports.rb -i "${KNAPSACK_RSPEC_SUITE_REPORT_PATH:-unknown_file}" -n "${new_reports}"
+ else
+ scripts/merge-reports "${KNAPSACK_RSPEC_SUITE_REPORT_PATH:-unknown_file}" ${knapsack_folder_path:-unknown_folder}rspec*.json
+ fi
export FLAKY_RSPEC_GENERATE_REPORT="true"
scripts/merge-reports "${FLAKY_RSPEC_SUITE_REPORT_PATH:-unknown_file}" ${rspec_flaky_folder_path:-unknown_folder}all_*.json
@@ -297,6 +304,9 @@ function retry_failed_rspec_examples() {
exit 1
fi
+ # Job metrics for influxDB/Grafana
+ tooling/bin/update_job_metrics_tag rspec_retried_in_new_process "true" || true
+
# Keep track of the tests that are retried, later consolidated in a single file by the `rspec:flaky-tests-report` job
local failed_examples=$(grep " failed" ${RSPEC_LAST_RUN_RESULTS_FILE})
local report_name=$(echo "${CI_JOB_NAME}" | sed -E 's|[/ ]|_|g') # e.g. 'rspec unit pg13 1/24' would become 'rspec_unit_pg13_1_24'
diff --git a/scripts/undercoverage b/scripts/undercoverage
index 348f421c0d5..4acfc78b11b 100755
--- a/scripts/undercoverage
+++ b/scripts/undercoverage
@@ -20,11 +20,11 @@ module Undercover
end
compare_base = ARGV[0]
-compare_base ||= IO.popen(%w(git merge-base origin/master HEAD)) { |p| p.read.chomp }
+compare_base ||= IO.popen(%w[git merge-base origin/master HEAD]) { |p| p.read.chomp }
coverage_file_path = 'coverage/lcov/gitlab.lcov'
result = if File.exist?(coverage_file_path)
- Undercover::CLI.run(%W(-c #{compare_base}))
+ Undercover::CLI.run(%W[-c #{compare_base}])
else
warn "#{coverage_file_path} doesn't exist"
0
diff --git a/scripts/utils.sh b/scripts/utils.sh
index 13a051e2b58..4a5e74353f6 100644
--- a/scripts/utils.sh
+++ b/scripts/utils.sh
@@ -14,7 +14,7 @@ function retry_times_sleep() {
for i in $(seq "${number_of_retries}" -1 1); do
sleep "$sleep_seconds"s
- echo "[$(date '+%H:%M:%S')] Retrying $i..."
+ echo "[$(date '+%H:%M:%S')] Retry attempts left: $i..."
if eval "$@"; then
return 0
fi
@@ -54,6 +54,7 @@ function test_url() {
status=$(eval "${cmd}")
if [[ $status == "200" ]]; then
+ echo -e "\n[$(date '+%H:%M:%S')] Curl to $url successful with 200 response"
return 0
else
# We display the error in the job to allow for better debugging