Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2022-11-17 14:33:21 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2022-11-17 14:33:21 +0300
commit7021455bd1ed7b125c55eb1b33c5a01f2bc55ee0 (patch)
tree5bdc2229f5198d516781f8d24eace62fc7e589e9 /scripts
parent185b095e93520f96e9cfc31d9c3e69b498cdab7c (diff)
Add latest changes from gitlab-org/gitlab@15-6-stable-eev15.6.0-rc42
Diffstat (limited to 'scripts')
-rw-r--r--scripts/api/create_issue.rb29
-rw-r--r--scripts/api/pipeline_failed_jobs.rb2
-rwxr-xr-xscripts/build_qa_image31
-rwxr-xr-xscripts/create-pipeline-failure-incident.rb174
-rwxr-xr-xscripts/generate-e2e-pipeline7
-rwxr-xr-xscripts/generate-failed-pipeline-slack-message.rb143
-rwxr-xr-xscripts/glfm/run-snapshot-tests.sh3
-rw-r--r--scripts/lib/glfm/constants.rb34
-rw-r--r--scripts/lib/glfm/parse_examples.rb10
-rw-r--r--scripts/lib/glfm/update_example_snapshots.rb12
-rw-r--r--scripts/lib/glfm/update_specification.rb161
-rw-r--r--scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb18
-rwxr-xr-xscripts/license-check.sh3
-rwxr-xr-xscripts/lint-doc.sh2
-rwxr-xr-xscripts/lint_templates_bash.rb6
-rwxr-xr-xscripts/merge-reports2
-rwxr-xr-xscripts/perf/gc/print_gc_stats.rb2
-rwxr-xr-xscripts/perf/query_limiting_report.rb26
-rwxr-xr-xscripts/qa/quarantine-types-check18
-rwxr-xr-xscripts/qa/testcases-check3
-rwxr-xr-xscripts/review_apps/automated_cleanup.rb6
-rw-r--r--scripts/review_apps/base-config.yaml108
-rwxr-xr-xscripts/review_apps/review-apps.sh63
-rw-r--r--scripts/rspec_helpers.sh15
-rwxr-xr-xscripts/rubocop-parse44
-rwxr-xr-xscripts/security-harness1
-rwxr-xr-xscripts/setup/as-if-jh.sh28
-rwxr-xr-xscripts/used-feature-flags4
-rw-r--r--scripts/utils.sh2
29 files changed, 687 insertions, 270 deletions
diff --git a/scripts/api/create_issue.rb b/scripts/api/create_issue.rb
new file mode 100644
index 00000000000..2117c285771
--- /dev/null
+++ b/scripts/api/create_issue.rb
@@ -0,0 +1,29 @@
+# frozen_string_literal: true
+
+require 'gitlab'
+require_relative 'default_options'
+
+class CreateIssue
+ def initialize(options)
+ @project = options.fetch(:project)
+
+ # Force the token to be a string so that if api_token is nil, it's set to '',
+ # allowing unauthenticated requests (for forks).
+ api_token = options.delete(:api_token).to_s
+
+ warn "No API token given." if api_token.empty?
+
+ @client = Gitlab.client(
+ endpoint: options.delete(:endpoint) || API::DEFAULT_OPTIONS[:endpoint],
+ private_token: api_token
+ )
+ end
+
+ def execute(issue_data)
+ client.create_issue(project, issue_data.delete(:title), issue_data)
+ end
+
+ private
+
+ attr_reader :project, :client
+end
diff --git a/scripts/api/pipeline_failed_jobs.rb b/scripts/api/pipeline_failed_jobs.rb
index c25567af698..df9a7e76dcd 100644
--- a/scripts/api/pipeline_failed_jobs.rb
+++ b/scripts/api/pipeline_failed_jobs.rb
@@ -1,7 +1,7 @@
# frozen_string_literal: true
require 'gitlab'
-require 'optparse'
+
require_relative 'default_options'
class PipelineFailedJobs
diff --git a/scripts/build_qa_image b/scripts/build_qa_image
index f4ecb8ed6b6..477bec29ba7 100755
--- a/scripts/build_qa_image
+++ b/scripts/build_qa_image
@@ -1,32 +1,43 @@
-#!/bin/sh
+#!/bin/bash
QA_IMAGE_NAME="gitlab-ee-qa"
-if [ "${CI_PROJECT_NAME}" == "gitlabhq" ] || [ "${CI_PROJECT_NAME}" == "gitlab-foss" ]; then
+if [[ "${CI_PROJECT_NAME}" == "gitlabhq" || "${CI_PROJECT_NAME}" == "gitlab-foss" ]]; then
QA_IMAGE_NAME="gitlab-ce-qa"
fi
# Tag with commit SHA by default
QA_IMAGE="${CI_REGISTRY}/${CI_PROJECT_PATH}/${QA_IMAGE_NAME}:${CI_COMMIT_SHA}"
+
# For branches, tag with slugified branch name. For tags, use the tag directly
-QA_IMAGE_BRANCH="${CI_REGISTRY}/${CI_PROJECT_PATH}/${QA_IMAGE_NAME}:${CI_COMMIT_TAG:-$CI_COMMIT_REF_SLUG}"
+# with v prefix removed
+IMAGE_TAG=${CI_COMMIT_TAG#v}
+IMAGE_TAG=${IMAGE_TAG:-$CI_COMMIT_REF_SLUG}
+
+QA_IMAGE_BRANCH="${CI_REGISTRY}/${CI_PROJECT_PATH}/${QA_IMAGE_NAME}:${IMAGE_TAG}"
+QA_IMAGE_MASTER="${CI_REGISTRY}/${CI_PROJECT_PATH}/${QA_IMAGE_NAME}:master"
-DESTINATIONS="--destination=${QA_IMAGE} --destination=${QA_IMAGE_BRANCH}"
+DESTINATIONS="--tag ${QA_IMAGE} --tag ${QA_IMAGE_BRANCH}"
# Auto-deploy tag format uses first 12 letters of commit SHA. Tag with that
# reference also for EE images.
if [ "${QA_IMAGE_NAME}" == "gitlab-ee-qa" ]; then
QA_IMAGE_FOR_AUTO_DEPLOY="${CI_REGISTRY}/${CI_PROJECT_PATH}/${QA_IMAGE_NAME}:${CI_COMMIT_SHA:0:11}"
- DESTINATIONS="${DESTINATIONS} --destination=$QA_IMAGE_FOR_AUTO_DEPLOY"
+ DESTINATIONS="${DESTINATIONS} --tag $QA_IMAGE_FOR_AUTO_DEPLOY"
fi
echo "Building QA image for destinations: ${DESTINATIONS}"
-/kaniko/executor \
- --context="${CI_PROJECT_DIR}" \
- --dockerfile="${CI_PROJECT_DIR}/qa/Dockerfile" \
+docker buildx build \
+ --cache-to=type=inline \
+ --cache-from="$QA_IMAGE_BRANCH" \
+ --cache-from="$QA_IMAGE_MASTER" \
+ --platform=${ARCH:-amd64} \
--build-arg=CHROME_VERSION="${CHROME_VERSION}" \
--build-arg=DOCKER_VERSION="${DOCKER_VERSION}" \
+ --build-arg=RUBY_VERSION="${RUBY_VERSION}" \
--build-arg=QA_BUILD_TARGET="${QA_BUILD_TARGET:-qa}" \
- --cache=true \
- ${DESTINATIONS}
+ --file="${CI_PROJECT_DIR}/qa/Dockerfile" \
+ --push \
+ ${DESTINATIONS} \
+ ${CI_PROJECT_DIR}
diff --git a/scripts/create-pipeline-failure-incident.rb b/scripts/create-pipeline-failure-incident.rb
new file mode 100755
index 00000000000..c38f80699e6
--- /dev/null
+++ b/scripts/create-pipeline-failure-incident.rb
@@ -0,0 +1,174 @@
+#!/usr/bin/env ruby
+
+# frozen_string_literal: true
+
+require 'optparse'
+require 'json'
+
+require_relative 'api/pipeline_failed_jobs'
+require_relative 'api/create_issue'
+
+class CreatePipelineFailureIncident
+ DEFAULT_OPTIONS = {
+ project: nil,
+ incident_json_file: 'incident.json'
+ }.freeze
+ DEFAULT_LABELS = ['Engineering Productivity', 'master-broken:undetermined'].freeze
+
+ def initialize(options)
+ @project = options.delete(:project)
+ @api_token = options.delete(:api_token)
+ end
+
+ def execute
+ payload = {
+ issue_type: 'incident',
+ title: title,
+ description: description,
+ labels: incident_labels
+ }
+
+ CreateIssue.new(project: project, api_token: api_token).execute(payload)
+ end
+
+ private
+
+ attr_reader :project, :api_token
+
+ def failed_jobs
+ @failed_jobs ||= PipelineFailedJobs.new(API::DEFAULT_OPTIONS.dup.merge(exclude_allowed_to_fail_jobs: true)).execute
+ end
+
+ def now
+ @now ||= Time.now.utc
+ end
+
+ def title
+ "#{now.strftime('%A %F %R UTC')} - `#{ENV['CI_PROJECT_PATH']}` broken `#{ENV['CI_COMMIT_REF_NAME']}` " \
+ "with #{failed_jobs.size} failed jobs"
+ end
+
+ def description
+ <<~MARKDOWN
+ ## #{project_link} pipeline #{pipeline_link} failed
+
+ **Branch: #{branch_link}**
+
+ **Commit: #{commit_link}**
+
+ **Triggered by** #{triggered_by_link} • **Source:** #{source} • **Duration:** #{pipeline_duration} minutes
+
+ **Failed jobs (#{failed_jobs.size}):**
+
+ #{failed_jobs_list}
+
+ ### General guidelines
+
+ Follow the [Broken `master` handbook guide](https://about.gitlab.com/handbook/engineering/workflow/#broken-master).
+
+ ### Investigation
+
+ **Be sure to fill the `Timeline` for this incident.**
+
+ 1. If the failure is new, and looks like a potential flaky failure, you can retry the failing job.
+ Make sure to mention the retry in the `Timeline` and leave a link to the retried job.
+ 1. If the failure looks like a broken `master`, communicate the broken `master` in Slack using the "Broadcast Master Broken" workflow:
+ - Click the Shortcut lightning bolt icon in the `#master-broken` channel and select "Broadcast Master Broken".
+ - Click "Continue the broadcast" after the automated message in `#master-broken`.
+
+ ### Pre-resolution
+
+ If you believe that there's an easy resolution by either:
+
+ - Reverting a particular merge request.
+ - Making a quick fix (for example, one line or a few similar simple changes in a few lines).
+ You can create a merge request, assign to any available maintainer, and ping people that were involved/related to the introduction of the failure.
+ Additionally, a message can be posted in `#backend_maintainers` or `#frontend_maintainers` to get a maintainer take a look at the fix ASAP.
+
+ In both cases, make sure to add the ~"pipeline:expedite-master-fixing" label, and `master:broken` or `master:foss-broken` label, to speed up the `master`-fixing pipelines.
+
+ ### Resolution
+
+ Follow [the Resolution steps from the handbook](https://about.gitlab.com/handbook/engineering/workflow/#responsibilities-of-the-resolution-dri).
+ MARKDOWN
+ end
+
+ def incident_labels
+ master_broken_label =
+ if ENV['CI_PROJECT_NAME'] == 'gitlab-foss'
+ 'master:foss-broken'
+ else
+ 'master:broken'
+ end
+
+ DEFAULT_LABELS.dup << master_broken_label
+ end
+
+ def pipeline_link
+ "[##{ENV['CI_PIPELINE_ID']}](#{ENV['CI_PIPELINE_URL']})"
+ end
+
+ def branch_link
+ "[`#{ENV['CI_COMMIT_REF_NAME']}`](#{ENV['CI_PROJECT_URL']}/-/commits/#{ENV['CI_COMMIT_REF_NAME']})"
+ end
+
+ def pipeline_duration
+ ((Time.now - Time.parse(ENV['CI_PIPELINE_CREATED_AT'])) / 60.to_f).round(2)
+ end
+
+ def commit_link
+ "[#{ENV['CI_COMMIT_TITLE']}](#{ENV['CI_PROJECT_URL']}/-/commit/#{ENV['CI_COMMIT_SHA']})"
+ end
+
+ def source
+ "`#{ENV['CI_PIPELINE_SOURCE']}`"
+ end
+
+ def project_link
+ "[#{ENV['CI_PROJECT_PATH']}](#{ENV['CI_PROJECT_URL']})"
+ end
+
+ def triggered_by_link
+ "[#{ENV['GITLAB_USER_NAME']}](#{ENV['CI_SERVER_URL']}/#{ENV['GITLAB_USER_LOGIN']})"
+ end
+
+ def failed_jobs_list_for_title
+ failed_jobs.map(&:name).join(', ')
+ end
+
+ def failed_jobs_list
+ failed_jobs.map { |job| "- [#{job.name}](#{job.web_url})" }.join("\n")
+ end
+end
+
+if $PROGRAM_NAME == __FILE__
+ options = CreatePipelineFailureIncident::DEFAULT_OPTIONS.dup
+
+ OptionParser.new do |opts|
+ opts.on("-p", "--project PROJECT", String, "Project where to create the incident (defaults to "\
+ "`#{CreatePipelineFailureIncident::DEFAULT_OPTIONS[:project]}`)") do |value|
+ options[:project] = value
+ end
+
+ opts.on("-f", "--incident-json-file file_path", String, "Path to a file where to save the incident JSON data "\
+ "(defaults to `#{CreatePipelineFailureIncident::DEFAULT_OPTIONS[:incident_json_file]}`)") do |value|
+ options[:incident_json_file] = value
+ end
+
+ opts.on("-t", "--api-token API_TOKEN", String, "A valid Project token with the `Reporter` role and `api` scope "\
+ "to create the incident") do |value|
+ options[:api_token] = value
+ end
+
+ opts.on("-h", "--help", "Prints this help") do
+ puts opts
+ exit
+ end
+ end.parse!
+
+ incident_json_file = options.delete(:incident_json_file)
+
+ CreatePipelineFailureIncident.new(options).execute.tap do |incident|
+ File.write(incident_json_file, JSON.pretty_generate(incident.to_h)) if incident_json_file
+ end
+end
diff --git a/scripts/generate-e2e-pipeline b/scripts/generate-e2e-pipeline
index 0d46a117719..aef2447e800 100755
--- a/scripts/generate-e2e-pipeline
+++ b/scripts/generate-e2e-pipeline
@@ -25,9 +25,12 @@ variables:
GITLAB_QA_CACHE_KEY: "$qa_cache_key"
GITLAB_VERSION: "$(cat VERSION)"
COLORIZED_LOGS: "true"
- QA_TESTS: "$QA_TESTS"
- QA_FEATURE_FLAGS: "${QA_FEATURE_FLAGS}"
+ QA_EXPORT_TEST_METRICS: "${QA_EXPORT_TEST_METRICS:-true}"
+ QA_SAVE_TEST_METRICS: "${QA_SAVE_TEST_METRICS:-false}"
+ QA_RUN_ALL_TESTS: "${QA_RUN_ALL_TESTS:-false}"
QA_FRAMEWORK_CHANGES: "${QA_FRAMEWORK_CHANGES:-false}"
+ QA_FEATURE_FLAGS: "${QA_FEATURE_FLAGS}"
+ QA_TESTS: "$QA_TESTS"
QA_SUITES: "$QA_SUITES"
YML
)
diff --git a/scripts/generate-failed-pipeline-slack-message.rb b/scripts/generate-failed-pipeline-slack-message.rb
index 699e32872e6..b695cdfdbee 100755
--- a/scripts/generate-failed-pipeline-slack-message.rb
+++ b/scripts/generate-failed-pipeline-slack-message.rb
@@ -2,21 +2,23 @@
# frozen_string_literal: true
-require_relative 'api/pipeline_failed_jobs'
+require 'optparse'
+require 'json'
-finder_options = API::DEFAULT_OPTIONS.dup.merge(exclude_allowed_to_fail_jobs: true)
-failed_jobs = PipelineFailedJobs.new(finder_options).execute
+require_relative 'api/pipeline_failed_jobs'
-class SlackReporter
- DEFAULT_FAILED_PIPELINE_REPORT_FILE = 'failed_pipeline_report.json'
+class GenerateFailedPipelineSlackMessage
+ DEFAULT_OPTIONS = {
+ failed_pipeline_slack_message_file: 'failed_pipeline_slack_message.json',
+ incident_json_file: 'incident.json'
+ }.freeze
- def initialize(failed_jobs)
- @failed_jobs = failed_jobs
- @failed_pipeline_report_file = ENV.fetch('FAILED_PIPELINE_REPORT_FILE', DEFAULT_FAILED_PIPELINE_REPORT_FILE)
+ def initialize(options)
+ @incident_json_file = options.delete(:incident_json_file)
end
- def report
- payload = {
+ def execute
+ {
channel: ENV['SLACK_CHANNEL'],
username: "Failed pipeline reporter",
icon_emoji: ":boom:",
@@ -27,33 +29,36 @@ class SlackReporter
text: {
type: "mrkdwn",
text: "*#{title}*"
+ },
+ accessory: {
+ type: "button",
+ text: {
+ type: "plain_text",
+ text: incident_button_text
+ },
+ url: incident_button_link
}
},
{
type: "section",
- fields: [
- {
- type: "mrkdwn",
- text: "*Commit*\n#{commit_link}"
- },
- {
- type: "mrkdwn",
- text: "*Triggered by*\n#{triggered_by_link}"
- }
- ]
+ text: {
+ type: "mrkdwn",
+ text: "*Branch*: #{branch_link}"
+ }
},
{
type: "section",
- fields: [
- {
- type: "mrkdwn",
- text: "*Source*\n#{source} from #{project_link}"
- },
- {
- type: "mrkdwn",
- text: "*Duration*\n#{pipeline_duration} minutes"
- }
- ]
+ text: {
+ type: "mrkdwn",
+ text: "*Commit*: #{commit_link}"
+ }
+ },
+ {
+ type: "section",
+ text: {
+ type: "mrkdwn",
+ text: "*Triggered by* #{triggered_by_link} • *Source:* #{source} • *Duration:* #{pipeline_duration} minutes"
+ }
},
{
type: "section",
@@ -64,16 +69,47 @@ class SlackReporter
}
]
}
-
- File.write(failed_pipeline_report_file, JSON.pretty_generate(payload))
end
private
- attr_reader :failed_jobs, :failed_pipeline_report_file
+ attr_reader :incident_json_file
+
+ def failed_jobs
+ @failed_jobs ||= PipelineFailedJobs.new(API::DEFAULT_OPTIONS.dup.merge(exclude_allowed_to_fail_jobs: true)).execute
+ end
def title
- "Pipeline #{pipeline_link} for #{branch_link} failed"
+ "#{project_link} pipeline #{pipeline_link} failed"
+ end
+
+ def incident_exist?
+ return @incident_exist if defined?(@incident_exist)
+
+ @incident_exist = File.exist?(incident_json_file)
+ end
+
+ def incident
+ return unless incident_exist?
+
+ @incident ||= JSON.parse(File.read(incident_json_file))
+ end
+
+ def incident_button_text
+ if incident_exist?
+ "View incident ##{incident['iid']}"
+ else
+ 'Create incident'
+ end
+ end
+
+ def incident_button_link
+ if incident_exist?
+ incident['web_url']
+ else
+ "#{ENV['CI_SERVER_URL']}/#{ENV['BROKEN_MASTER_INCIDENTS_PROJECT']}/-/issues/new?" \
+ "issuable_template=incident&issue%5Bissue_type%5D=incident"
+ end
end
def pipeline_link
@@ -93,11 +129,15 @@ class SlackReporter
end
def source
- "`#{ENV['CI_PIPELINE_SOURCE']}`"
+ "`#{ENV['CI_PIPELINE_SOURCE']}#{schedule_type}`"
+ end
+
+ def schedule_type
+ ENV['CI_PIPELINE_SOURCE'] == 'schedule' ? ": #{ENV['SCHEDULE_TYPE']}" : ''
end
def project_link
- "<#{ENV['CI_PROJECT_URL']}|#{ENV['CI_PROJECT_NAME']}>"
+ "<#{ENV['CI_PROJECT_URL']}|#{ENV['CI_PROJECT_PATH']}>"
end
def triggered_by_link
@@ -109,4 +149,33 @@ class SlackReporter
end
end
-SlackReporter.new(failed_jobs).report
+if $PROGRAM_NAME == __FILE__
+ options = GenerateFailedPipelineSlackMessage::DEFAULT_OPTIONS.dup
+
+ OptionParser.new do |opts|
+ opts.on("-i", "--incident-json-file file_path", String, "Path to a file where the incident JSON data "\
+ "can be found (defaults to "\
+ "`#{GenerateFailedPipelineSlackMessage::DEFAULT_OPTIONS[:incident_json_file]}`)") do |value|
+ options[:incident_json_file] = value
+ end
+
+ opts.on("-f", "--failed-pipeline-slack-message-file file_path", String, "Path to a file where to save the Slack "\
+ "message (defaults to "\
+ "`#{GenerateFailedPipelineSlackMessage::DEFAULT_OPTIONS[:failed_pipeline_slack_message_file]}`)") do |value|
+ options[:failed_pipeline_slack_message_file] = value
+ end
+
+ opts.on("-h", "--help", "Prints this help") do
+ puts opts
+ exit
+ end
+ end.parse!
+
+ failed_pipeline_slack_message_file = options.delete(:failed_pipeline_slack_message_file)
+
+ GenerateFailedPipelineSlackMessage.new(options).execute.tap do |message_payload|
+ if failed_pipeline_slack_message_file
+ File.write(failed_pipeline_slack_message_file, JSON.pretty_generate(message_payload))
+ end
+ end
+end
diff --git a/scripts/glfm/run-snapshot-tests.sh b/scripts/glfm/run-snapshot-tests.sh
index 6a66d8fbd9a..0a3891e243a 100755
--- a/scripts/glfm/run-snapshot-tests.sh
+++ b/scripts/glfm/run-snapshot-tests.sh
@@ -24,6 +24,9 @@ printf "\nStarting GLFM snapshot example tests. See https://docs.gitlab.com/ee/d
printf "Set 'FOCUSED_MARKDOWN_EXAMPLES=example_name_1[,...]' for focused examples, with example name(s) from https://docs.gitlab.com/ee/development/gitlab_flavored_markdown/specification_guide/#glfm_specificationexample_snapshotsexamples_indexyml.\n"
printf "${Color_Off}"
+# NOTE: Unlike the backend markdown_snapshot_spec.rb which has a CE and EE version, there is only
+# one version of this spec. This is because the frontend markdown rendering does not require EE-only
+# backend features.
printf "\n${BBlue}Running frontend 'yarn jest spec/frontend/content_editor/markdown_snapshot_spec.js'...${Color_Off}\n\n"
yarn jest spec/frontend/content_editor/markdown_snapshot_spec.js
printf "\n${BBlue}'yarn jest spec/frontend/content_editor/markdown_snapshot_spec.js' passed!${Color_Off}\n\n"
diff --git a/scripts/lib/glfm/constants.rb b/scripts/lib/glfm/constants.rb
index d020d2fec5c..c432e5495dd 100644
--- a/scripts/lib/glfm/constants.rb
+++ b/scripts/lib/glfm/constants.rb
@@ -15,24 +15,25 @@ module Glfm
# GitLab Flavored Markdown specification files
specification_input_glfm_path = specification_path.join('input/gitlab_flavored_markdown')
- GLFM_INTRO_MD_PATH = specification_input_glfm_path.join('glfm_intro.md')
- GLFM_OFFICIAL_SPECIFICATION_EXAMPLES_MD_PATH =
- specification_input_glfm_path.join('glfm_official_specification_examples.md')
- GLFM_INTERNAL_EXTENSION_EXAMPLES_MD_PATH = specification_input_glfm_path.join('glfm_internal_extension_examples.md')
+ GLFM_OFFICIAL_SPECIFICATION_MD_PATH =
+ specification_input_glfm_path.join('glfm_official_specification.md')
+ GLFM_INTERNAL_EXTENSIONS_MD_PATH = specification_input_glfm_path.join('glfm_internal_extensions.md')
GLFM_EXAMPLE_STATUS_YML_PATH = specification_input_glfm_path.join('glfm_example_status.yml')
GLFM_EXAMPLE_METADATA_YML_PATH =
specification_input_glfm_path.join('glfm_example_metadata.yml')
GLFM_EXAMPLE_NORMALIZATIONS_YML_PATH = specification_input_glfm_path.join('glfm_example_normalizations.yml')
- GLFM_SPEC_OUTPUT_PATH = specification_path.join('output')
- GLFM_SPEC_TXT_PATH = GLFM_SPEC_OUTPUT_PATH.join('spec.txt')
- GLFM_SPEC_HTML_PATH = GLFM_SPEC_OUTPUT_PATH.join('spec.html')
+ GLFM_OUTPUT_SPEC_PATH = specification_path.join('output_spec')
+ GLFM_SPEC_TXT_PATH = GLFM_OUTPUT_SPEC_PATH.join('spec.txt')
+ GLFM_SPEC_HTML_PATH = GLFM_OUTPUT_SPEC_PATH.join('spec.html')
# Example Snapshot (ES) files
- EXAMPLE_SNAPSHOTS_PATH = File.expand_path("../../../glfm_specification/example_snapshots", __dir__)
- ES_EXAMPLES_INDEX_YML_PATH = File.join(EXAMPLE_SNAPSHOTS_PATH, 'examples_index.yml')
- ES_MARKDOWN_YML_PATH = File.join(EXAMPLE_SNAPSHOTS_PATH, 'markdown.yml')
- ES_HTML_YML_PATH = File.join(EXAMPLE_SNAPSHOTS_PATH, 'html.yml')
- ES_PROSEMIRROR_JSON_YML_PATH = File.join(EXAMPLE_SNAPSHOTS_PATH, 'prosemirror_json.yml')
+ ES_OUTPUT_EXAMPLE_SNAPSHOTS_PATH = specification_path.join('output_example_snapshots')
+ ES_SNAPSHOT_SPEC_MD_PATH = ES_OUTPUT_EXAMPLE_SNAPSHOTS_PATH.join('snapshot_spec.md')
+ ES_SNAPSHOT_SPEC_HTML_PATH = ES_OUTPUT_EXAMPLE_SNAPSHOTS_PATH.join('snapshot_spec.html')
+ ES_EXAMPLES_INDEX_YML_PATH = ES_OUTPUT_EXAMPLE_SNAPSHOTS_PATH.join('examples_index.yml')
+ ES_MARKDOWN_YML_PATH = ES_OUTPUT_EXAMPLE_SNAPSHOTS_PATH.join('markdown.yml')
+ ES_HTML_YML_PATH = ES_OUTPUT_EXAMPLE_SNAPSHOTS_PATH.join('html.yml')
+ ES_PROSEMIRROR_JSON_YML_PATH = ES_OUTPUT_EXAMPLE_SNAPSHOTS_PATH.join('prosemirror_json.yml')
# Other constants used for processing files
GLFM_SPEC_TXT_HEADER = <<~MARKDOWN
@@ -41,8 +42,13 @@ module Glfm
version: alpha
...
MARKDOWN
- INTRODUCTION_HEADER_LINE_TEXT = /\A# Introduction\Z/.freeze
- END_TESTS_COMMENT_LINE_TEXT = /\A<!-- END TESTS -->\Z/.freeze
+ EXAMPLE_BACKTICKS_LENGTH = 32
+ EXAMPLE_BACKTICKS_STRING = '`' * EXAMPLE_BACKTICKS_LENGTH
+ EXAMPLE_BEGIN_STRING = "#{EXAMPLE_BACKTICKS_STRING} example"
+ EXAMPLE_END_STRING = EXAMPLE_BACKTICKS_STRING
+ INTRODUCTION_HEADER_LINE_TEXT = '# Introduction'
+ BEGIN_TESTS_COMMENT_LINE_TEXT = '<!-- BEGIN TESTS -->'
+ END_TESTS_COMMENT_LINE_TEXT = '<!-- END TESTS -->'
MARKDOWN_TEMPFILE_BASENAME = %w[MARKDOWN_TEMPFILE_ .yml].freeze
METADATA_TEMPFILE_BASENAME = %w[METADATA_TEMPFILE_ .yml].freeze
STATIC_HTML_TEMPFILE_BASENAME = %w[STATIC_HTML_TEMPFILE_ .yml].freeze
diff --git a/scripts/lib/glfm/parse_examples.rb b/scripts/lib/glfm/parse_examples.rb
index a15a6ecc47b..aedca274889 100644
--- a/scripts/lib/glfm/parse_examples.rb
+++ b/scripts/lib/glfm/parse_examples.rb
@@ -1,5 +1,7 @@
# frozen_string_literal: true
+require_relative 'constants'
+
# This module contains a Ruby port of Python logic from the `get_tests` method of the
# `spec_test.py` script (see copy of original code in a comment at the bottom of this file):
# https://github.com/github/cmark-gfm/blob/5dfedc7/test/spec_tests.py#L82
@@ -20,11 +22,11 @@
# in `scripts/lib/glfm/update_example_snapshots.rb`
module Glfm
module ParseExamples
+ include Constants
+
REGULAR_TEXT = 0
MARKDOWN_EXAMPLE = 1
HTML_OUTPUT = 2
- EXAMPLE_BACKTICKS_LENGTH = 32
- EXAMPLE_BACKTICKS_STRING = '`' * EXAMPLE_BACKTICKS_LENGTH
# rubocop:disable Metrics/CyclomaticComplexity, Metrics/PerceivedComplexity, Metrics/AbcSize
def parse_examples(spec_txt_lines)
@@ -47,11 +49,11 @@ module Glfm
spec_txt_lines.each do |line|
line_number += 1
stripped_line = line.strip
- if stripped_line.start_with?("#{EXAMPLE_BACKTICKS_STRING} example")
+ if stripped_line.start_with?(EXAMPLE_BEGIN_STRING)
# If beginning line of an example block...
state = MARKDOWN_EXAMPLE
extensions = stripped_line[(EXAMPLE_BACKTICKS_LENGTH + " example".length)..].split
- elsif stripped_line == EXAMPLE_BACKTICKS_STRING
+ elsif stripped_line == EXAMPLE_END_STRING
# Else if end line of an example block...
state = REGULAR_TEXT
example_number += 1
diff --git a/scripts/lib/glfm/update_example_snapshots.rb b/scripts/lib/glfm/update_example_snapshots.rb
index 9075260e748..8f817d0173e 100644
--- a/scripts/lib/glfm/update_example_snapshots.rb
+++ b/scripts/lib/glfm/update_example_snapshots.rb
@@ -30,12 +30,12 @@ module Glfm
def process(skip_static_and_wysiwyg: false)
output('Updating example snapshots...')
- output("Reading #{GLFM_SPEC_TXT_PATH}...")
- glfm_spec_txt_lines = File.open(GLFM_SPEC_TXT_PATH).readlines
+ output("Reading #{ES_SNAPSHOT_SPEC_MD_PATH}...")
+ es_snapshot_spec_md_lines = File.open(ES_SNAPSHOT_SPEC_MD_PATH).readlines
- # Parse all the examples from `spec.txt`, using a Ruby port of the Python `get_tests`
+ # Parse all the examples from `snapshot_spec.md`, using a Ruby port of the Python `get_tests`
# function the from original CommonMark/GFM `spec_test.py` script.
- all_examples = parse_examples(glfm_spec_txt_lines)
+ all_examples = parse_examples(es_snapshot_spec_md_lines)
add_example_names(all_examples)
@@ -55,7 +55,7 @@ module Glfm
# in the H1 header count. So, even though due to the concatenation it appears before the
# GitLab examples sections, it doesn't result in their header counts being off by +1.
# 5. If an example contains the 'disabled' string extension, it is skipped (and will thus
- # result in a skip in the `spec_txt_example_position`). This behavior is taken from the
+ # result in a skip in the `spec_example_position`). This behavior is taken from the
# GFM `spec_test.py` script (but it's NOT in the original CommonMark `spec_test.py`).
# 6. If a section contains ONLY disabled examples, the section numbering will still be
# incremented to match the rendered HTML specification section numbering.
@@ -202,7 +202,7 @@ module Glfm
) do |example, hash|
name = example.fetch(:name).to_sym
hash[name] = {
- 'spec_txt_example_position' => example.fetch(:example),
+ 'spec_example_position' => example.fetch(:example),
'source_specification' => source_specification_for_extensions(example.fetch(:extensions))
}
end
diff --git a/scripts/lib/glfm/update_specification.rb b/scripts/lib/glfm/update_specification.rb
index c7264547e44..b87005bdb90 100644
--- a/scripts/lib/glfm/update_specification.rb
+++ b/scripts/lib/glfm/update_specification.rb
@@ -23,17 +23,42 @@ module Glfm
def process(skip_spec_html_generation: false)
output('Updating specification...')
+ # read and optionally update `input/github_flavored_markdown/ghfm_spec_v_x.yy.md`
ghfm_spec_lines = load_ghfm_spec
- glfm_spec_txt_string = build_glfm_spec_txt(ghfm_spec_lines)
+
+ # create `output_spec/spec.txt`
+ glfm_spec_txt_header_lines = GLFM_SPEC_TXT_HEADER.split("\n").map { |line| "#{line}\n" }
+ official_spec_lines = readlines_from_path!(GLFM_OFFICIAL_SPECIFICATION_MD_PATH)
+
+ glfm_spec_txt_string = (glfm_spec_txt_header_lines + official_spec_lines).join('')
write_glfm_spec_txt(glfm_spec_txt_string)
+ # create `output_example_snapshots/snapshot_spec.md`
+ ghfm_spec_example_lines = extract_ghfm_spec_example_lines(ghfm_spec_lines)
+ official_spec_example_lines =
+ extract_glfm_spec_example_lines(official_spec_lines, GLFM_OFFICIAL_SPECIFICATION_MD_PATH)
+ internal_extension_lines = readlines_from_path!(GLFM_INTERNAL_EXTENSIONS_MD_PATH)
+ internal_extension_example_lines =
+ extract_glfm_spec_example_lines(internal_extension_lines, GLFM_INTERNAL_EXTENSIONS_MD_PATH)
+ snapshot_spec_md_string = (
+ glfm_spec_txt_header_lines +
+ ghfm_spec_example_lines +
+ official_spec_example_lines +
+ ["\n"] +
+ internal_extension_example_lines
+ ).join('')
+ write_snapshot_spec_md(snapshot_spec_md_string)
+
if skip_spec_html_generation
- output("Skipping GLFM spec.html generation...")
+ output("Skipping GLFM spec.html and snapshot_spec.html generation...")
return
end
- glfm_spec_html_string = generate_glfm_spec_html(glfm_spec_txt_string)
- write_glfm_spec_html(glfm_spec_html_string)
+ # create `output_spec/spec.html` and `output_snapshot_examples/snapshot_spec.html`
+ spec_html_string, snapshot_spec_html_string =
+ generate_spec_html_files(glfm_spec_txt_string, snapshot_spec_md_string)
+ write_spec_html(spec_html_string)
+ write_snapshot_spec_html(snapshot_spec_html_string)
end
private
@@ -68,9 +93,7 @@ module Glfm
ghfm_spec_txt_uri_parsed = URI.parse(GHFM_SPEC_TXT_URI)
ghfm_spec_txt_uri_io = ghfm_spec_txt_uri_parsed.open
- # Read IO stream into an array of lines for easy processing later
- ghfm_spec_lines = ghfm_spec_txt_uri_io.readlines
- raise "Unable to read lines from #{GHFM_SPEC_TXT_URI}" if ghfm_spec_lines.empty?
+ ghfm_spec_lines = readlines_from_io!(ghfm_spec_txt_uri_io, GHFM_SPEC_TXT_URI)
# Make sure the GHFM spec version has not changed
validate_expected_spec_version!(ghfm_spec_lines[2])
@@ -95,59 +118,42 @@ module Glfm
"Expected 'version: #{GHFM_SPEC_VERSION}', got '#{version_line}'"
end
- def build_glfm_spec_txt(ghfm_spec_txt_lines)
- glfm_spec_txt_lines = ghfm_spec_txt_lines.dup
- replace_header(glfm_spec_txt_lines)
- replace_intro_section(glfm_spec_txt_lines)
- insert_examples(glfm_spec_txt_lines)
- glfm_spec_txt_lines.join('')
- end
-
- def replace_header(spec_txt_lines)
- spec_txt_lines[0, spec_txt_lines.index("...\n") + 1] = GLFM_SPEC_TXT_HEADER
- end
-
- def replace_intro_section(spec_txt_lines)
- glfm_intro_md_lines = File.open(GLFM_INTRO_MD_PATH).readlines
- raise "Unable to read lines from #{GLFM_INTRO_MD_PATH}" if glfm_intro_md_lines.empty?
-
- ghfm_intro_header_begin_index = spec_txt_lines.index do |line|
- line =~ INTRODUCTION_HEADER_LINE_TEXT
+ def extract_ghfm_spec_example_lines(spec_lines)
+ # In the GHFM spec.txt format, all we have to identify the headers containing examples
+ # is the presence of a single initial H1 named "Introduction" before the first
+ # header containing examples, and the <!-- END TESTS --> comment after the last header
+ # containing examples.
+ path = GHFM_SPEC_MD_PATH
+ first_examples_header_index = spec_lines.index do |line|
+ line.start_with?('# ') && !line.start_with?(INTRODUCTION_HEADER_LINE_TEXT)
end
- raise "Unable to locate introduction header line in #{GHFM_SPEC_MD_PATH}" if ghfm_intro_header_begin_index.nil?
+ raise "Unable to find first examples header in #{path}" unless first_examples_header_index
- # Find the index of the next header after the introduction header, starting from the index
- # of the introduction header this is the length of the intro section
- ghfm_intro_section_length = spec_txt_lines[ghfm_intro_header_begin_index + 1..].index do |line|
- line.start_with?('# ')
+ end_tests_comment_index = spec_lines.index do |line|
+ line.start_with?(END_TESTS_COMMENT_LINE_TEXT)
end
+ raise "Unable to locate 'END TESTS' comment line in #{path}" if end_tests_comment_index.nil?
- # Replace the intro section with the GitLab flavored Markdown intro section
- spec_txt_lines[ghfm_intro_header_begin_index, ghfm_intro_section_length] = glfm_intro_md_lines
+ spec_lines[first_examples_header_index..(end_tests_comment_index - 1)]
end
- def insert_examples(spec_txt_lines)
- official_spec_lines = File.open(GLFM_OFFICIAL_SPECIFICATION_EXAMPLES_MD_PATH).readlines
- raise "Unable to read lines from #{GLFM_OFFICIAL_SPECIFICATION_EXAMPLES_MD_PATH}" if official_spec_lines.empty?
-
- internal_extension_lines = File.open(GLFM_INTERNAL_EXTENSION_EXAMPLES_MD_PATH).readlines
- raise "Unable to read lines from #{GLFM_INTERNAL_EXTENSION_EXAMPLES_MD_PATH}" if internal_extension_lines.empty?
+ def extract_glfm_spec_example_lines(spec_lines, path)
+ # In the GLFM input markdown files (unlike the GLFM spec.txt format), we have control over
+ # the contents, so we can use explicit <!-- BEGIN TESTS --> and <!-- END TESTS -->
+ # is the presence of a single initial H1 named "Introduction" before the first
+ # header containing examples, and the <!-- END TESTS --> comment after the last header
+ # containing examples.
+ begin_tests_comment_line_index = spec_lines.index do |line|
+ line.start_with?(BEGIN_TESTS_COMMENT_LINE_TEXT)
+ end
+ raise "Unable to locate 'BEGIN TESTS' comment line in #{path}" unless begin_tests_comment_line_index
- ghfm_end_tests_comment_index = spec_txt_lines.index do |line|
- line =~ END_TESTS_COMMENT_LINE_TEXT
+ end_tests_comment_index = spec_lines.index do |line|
+ line.start_with?(END_TESTS_COMMENT_LINE_TEXT)
end
- raise "Unable to locate 'END TESTS' comment line in #{GHFM_SPEC_MD_PATH}" if ghfm_end_tests_comment_index.nil?
-
- # Insert the GLFM examples before the 'END TESTS' comment line
- spec_txt_lines[ghfm_end_tests_comment_index - 1] = [
- "\n",
- official_spec_lines,
- "\n",
- internal_extension_lines,
- "\n"
- ].flatten
-
- spec_txt_lines
+ raise "Unable to locate 'END TESTS' comment line in #{path}" if end_tests_comment_index.nil?
+
+ spec_lines[(begin_tests_comment_line_index + 1)..(end_tests_comment_index - 1)]
end
def write_glfm_spec_txt(glfm_spec_txt_string)
@@ -156,13 +162,24 @@ module Glfm
write_file(GLFM_SPEC_TXT_PATH, glfm_spec_txt_string)
end
- def generate_glfm_spec_html(glfm_spec_txt_string)
- output("Generating spec.html from spec.txt markdown...")
+ def write_snapshot_spec_md(snapshot_spec_md_string)
+ output("Writing #{ES_SNAPSHOT_SPEC_MD_PATH}...")
+ FileUtils.mkdir_p(Pathname.new(ES_SNAPSHOT_SPEC_MD_PATH).dirname)
+ write_file(ES_SNAPSHOT_SPEC_MD_PATH, snapshot_spec_md_string)
+ end
+
+ def generate_spec_html_files(spec_txt_string, snapshot_spec_md_string)
+ output("Generating spec.html and snapshot_spec.html from spec.txt and snapshot_spec.md markdown...")
+
+ spec_txt_string_split_examples = split_examples_into_html_and_md(spec_txt_string)
+ snapshot_spec_md_string_split_examples = split_examples_into_html_and_md(snapshot_spec_md_string)
input_markdown_yml_string = <<~MARKDOWN
---
spec_txt: |
- #{glfm_spec_txt_string.gsub(/^/, ' ')}
+ #{spec_txt_string_split_examples.gsub(/^/, ' ')}
+ snapshot_spec_md: |
+ #{snapshot_spec_md_string_split_examples.gsub(/^/, ' ')}
MARKDOWN
# NOTE: We must copy the input YAML file used by the `render_static_html.rb`
@@ -190,14 +207,40 @@ module Glfm
cmd = %(bin/rspec #{__dir__}/render_static_html.rb)
run_external_cmd(cmd)
- output("Reading generated spec.html from tempfile #{static_html_tempfile_path}...")
- YAML.safe_load(File.open(static_html_tempfile_path), symbolize_names: true).fetch(:spec_txt)
+ output("Reading generated html from tempfile #{static_html_tempfile_path}...")
+ rendered_html_hash = YAML.safe_load(File.open(static_html_tempfile_path), symbolize_names: true)
+ [rendered_html_hash.fetch(:spec_txt), rendered_html_hash.fetch(:snapshot_spec_md)]
+ end
+
+ def split_examples_into_html_and_md(spec_md_string)
+ spec_md_string.gsub(
+ /(^#{EXAMPLE_BEGIN_STRING}.*?$(?:.|\n)*?)^\.$(\n(?:.|\n)*?^#{EXAMPLE_END_STRING}$)/mo,
+ "\\1#{EXAMPLE_BACKTICKS_STRING}\n\n#{EXAMPLE_BACKTICKS_STRING}\\2"
+ )
end
- def write_glfm_spec_html(glfm_spec_html_string)
+ def write_spec_html(spec_html_string)
output("Writing #{GLFM_SPEC_TXT_PATH}...")
FileUtils.mkdir_p(Pathname.new(GLFM_SPEC_HTML_PATH).dirname)
- write_file(GLFM_SPEC_HTML_PATH, "#{glfm_spec_html_string}\n")
+ write_file(GLFM_SPEC_HTML_PATH, "#{spec_html_string}\n")
+ end
+
+ def write_snapshot_spec_html(snapshot_spec_html_string)
+ output("Writing #{ES_SNAPSHOT_SPEC_HTML_PATH}...")
+ FileUtils.mkdir_p(Pathname.new(ES_SNAPSHOT_SPEC_HTML_PATH).dirname)
+ write_file(ES_SNAPSHOT_SPEC_HTML_PATH, "#{snapshot_spec_html_string}\n")
+ end
+
+ def readlines_from_path!(path)
+ io = File.open(path)
+ readlines_from_io!(io, path)
+ end
+
+ def readlines_from_io!(io, uri_or_path)
+ lines = io.readlines
+ raise "Unable to read lines from #{uri_or_path}" if lines.empty?
+
+ lines
end
end
end
diff --git a/scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb b/scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb
index 0b824fc589d..3d4570f74e5 100644
--- a/scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb
+++ b/scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb
@@ -12,7 +12,7 @@ module Glfm
include Shared
def process
- verify_cmd = "git status --porcelain #{GLFM_SPEC_OUTPUT_PATH} #{EXAMPLE_SNAPSHOTS_PATH}"
+ verify_cmd = "git status --porcelain #{GLFM_OUTPUT_SPEC_PATH} #{ES_OUTPUT_EXAMPLE_SNAPSHOTS_PATH}"
verify_cmd_output = run_external_cmd(verify_cmd)
unless verify_cmd_output.empty?
msg = "ERROR: Cannot run `#{__FILE__}` because `#{verify_cmd}` shows the following uncommitted changes:\n" \
@@ -41,8 +41,20 @@ module Glfm
return if verify_cmd_output.empty?
- raise "The following files were modified by running GLFM scripts. Please review, verify, and commit " \
- "the changes:\n#{verify_cmd_output}"
+ warn(
+ "ERROR: The following files were modified by running GLFM scripts. Please review, verify, and commit " \
+ "the changes:\n#{verify_cmd_output}\n"
+ )
+ warn("See the CI artifacts for the modified version of the files.\n")
+
+ warn("This is the output of `git diff`:\n")
+ diff_output = run_external_cmd('git diff')
+ warn(diff_output)
+
+ # Ensure that the diff output is flushed and output before we raise and exit.
+ $stderr.flush
+
+ raise('ERROR: The generated files are not up to date.')
end
end
end
diff --git a/scripts/license-check.sh b/scripts/license-check.sh
index c2d5cfd8cde..2a210754a0d 100755
--- a/scripts/license-check.sh
+++ b/scripts/license-check.sh
@@ -1,4 +1,5 @@
-#!/bin/sh
+#!/usr/bin/env bash
+set -euo pipefail
#
# This script runs the LicenseFinder gem to verify that all licenses are
# compliant. However, bundler v2.2+ and LicenseFinder do not play well
diff --git a/scripts/lint-doc.sh b/scripts/lint-doc.sh
index f954b2d8106..68dfac95ef6 100755
--- a/scripts/lint-doc.sh
+++ b/scripts/lint-doc.sh
@@ -151,7 +151,7 @@ if [ -z "${MD_DOC_PATH}" ]
then
echo "Merged results pipeline detected, but no markdown files found. Skipping."
else
- run_locally_or_in_docker 'markdownlint' "--config .markdownlint.yml ${MD_DOC_PATH}"
+ run_locally_or_in_docker 'markdownlint' "--config .markdownlint.yml ${MD_DOC_PATH} --rules doc/.markdownlint/rules"
fi
echo '=> Linting prose...'
diff --git a/scripts/lint_templates_bash.rb b/scripts/lint_templates_bash.rb
index 8db9469ecdf..cd36bb629ab 100755
--- a/scripts/lint_templates_bash.rb
+++ b/scripts/lint_templates_bash.rb
@@ -58,6 +58,12 @@ module LintTemplatesBash
def check_template(template)
parsed = process_content(template.content)
+
+ unless parsed.valid?
+ warn "#{template.full_name} is invalid: #{parsed.errors.inspect}"
+ return true
+ end
+
results = parsed.jobs.map do |name, job|
out, success = check_job(job)
diff --git a/scripts/merge-reports b/scripts/merge-reports
index a1164495f2f..43374d134d4 100755
--- a/scripts/merge-reports
+++ b/scripts/merge-reports
@@ -14,6 +14,8 @@ main_report = JSON.parse(File.read(main_report_file))
new_report = main_report.dup
ARGV.each do |report_file|
+ next unless File.exist?(report_file)
+
report = JSON.parse(File.read(report_file))
# Remove existing values
diff --git a/scripts/perf/gc/print_gc_stats.rb b/scripts/perf/gc/print_gc_stats.rb
index 4aeb2f1ef07..a15b6f5b0e0 100755
--- a/scripts/perf/gc/print_gc_stats.rb
+++ b/scripts/perf/gc/print_gc_stats.rb
@@ -60,7 +60,7 @@ gc_stat_keys = ENV['GC_STAT_KEYS'].to_s.split(',').map(&:to_sym)
values = []
values << ENV['SETTING_CSV']
values += gc_stat_keys.map { |k| gc_stats[k] }
-values << ::Gitlab::Metrics::System.memory_usage_rss
+values << ::Gitlab::Metrics::System.memory_usage_rss[:total]
values << gc_total_time
values << tms.utime + tms.cutime
values << tms.stime + tms.cstime
diff --git a/scripts/perf/query_limiting_report.rb b/scripts/perf/query_limiting_report.rb
index 364cd6fc5d4..6326b2590ae 100755
--- a/scripts/perf/query_limiting_report.rb
+++ b/scripts/perf/query_limiting_report.rb
@@ -124,19 +124,19 @@ class QueryLimitingReport
file_lines.each_index do |index|
line = file_lines[index]
- if line =~ /#{CODE_LINES_SEARCH_STRING}/o
- issue_iid = line.slice(%r{issues/(\d+)\D}, 1)
- line_number = index + 1
- code_line = {
- file_location: "#{filename}:#{line_number}",
- filename: filename,
- line_number: line_number,
- line: line,
- issue_iid: issue_iid.to_i,
- has_issue_iid: !issue_iid.nil?
- }
- code_lines << code_line
- end
+ next unless line =~ /#{CODE_LINES_SEARCH_STRING}/o
+
+ issue_iid = line.slice(%r{issues/(\d+)\D}, 1)
+ line_number = index + 1
+ code_line = {
+ file_location: "#{filename}:#{line_number}",
+ filename: filename,
+ line_number: line_number,
+ line: line,
+ issue_iid: issue_iid.to_i,
+ has_issue_iid: !issue_iid.nil?
+ }
+ code_lines << code_line
end
end
diff --git a/scripts/qa/quarantine-types-check b/scripts/qa/quarantine-types-check
index 44d329a3590..188348b949c 100755
--- a/scripts/qa/quarantine-types-check
+++ b/scripts/qa/quarantine-types-check
@@ -30,19 +30,19 @@ puts "\nAnalyzing quarantined test data...\n"
tests = data_hash['examples']
tests.each do |test|
- if test['quarantine']
- unless QUARANTINE_TYPES.include?(test['quarantine']['type'])
- quarantine_type_errors.push(
- <<~TYPE_ERRORS
+ next unless test['quarantine']
+
+ unless QUARANTINE_TYPES.include?(test['quarantine']['type'])
+ quarantine_type_errors.push(
+ <<~TYPE_ERRORS
==> #{test['full_description']}
in file: #{test['id']}
with type: "#{test['quarantine']['type']}"
- TYPE_ERRORS
- )
- end
-
- missing_issues.push(" ==> #{test['id']} - #{test['full_description']}\n") unless test['quarantine']['issue']
+ TYPE_ERRORS
+ )
end
+
+ missing_issues.push(" ==> #{test['id']} - #{test['full_description']}\n") unless test['quarantine']['issue']
end
if quarantine_type_errors.empty? && missing_issues.empty?
diff --git a/scripts/qa/testcases-check b/scripts/qa/testcases-check
index 12af15fe73f..26098678f7c 100755
--- a/scripts/qa/testcases-check
+++ b/scripts/qa/testcases-check
@@ -74,6 +74,7 @@ else
puts missing_message % missing_testcases.join("\n") unless missing_testcases.empty?
puts format_message % testcase_format_errors.join("\n") unless testcase_format_errors.empty?
puts "\n*** Please link a unique test case from the GitLab project for the errors listed above.\n"
- puts " See: https://docs.gitlab.com/ee/development/testing_guide/end_to_end/best_practices.html#link-a-test-to-its-test-case."
+ puts " See: https://docs.gitlab.com/ee/development/testing_guide/end_to_end/best_practices.html#link-a-test-to-its-test-case"\
+ " for further details on how to create test cases"
exit 1
end
diff --git a/scripts/review_apps/automated_cleanup.rb b/scripts/review_apps/automated_cleanup.rb
index 2440df6958d..f020283de52 100755
--- a/scripts/review_apps/automated_cleanup.rb
+++ b/scripts/review_apps/automated_cleanup.rb
@@ -88,7 +88,7 @@ module ReviewApps
if deployed_at < delete_threshold
deleted_environment = delete_environment(environment, deployment)
if deleted_environment
- release = Tooling::Helm3Client::Release.new(environment.slug, 1, deployed_at.to_s, nil, nil, review_apps_namespace)
+ release = Tooling::Helm3Client::Release.new(environment.slug, 1, deployed_at.to_s, nil, nil, environment.slug)
releases_to_delete << release
end
else
@@ -104,7 +104,7 @@ module ReviewApps
end
delete_stopped_environments(environment_type: :review_app, checked_environments: checked_environments, last_updated_threshold: delete_threshold) do |environment|
- releases_to_delete << Tooling::Helm3Client::Release.new(environment.slug, 1, environment.updated_at, nil, nil, review_apps_namespace)
+ releases_to_delete << Tooling::Helm3Client::Release.new(environment.slug, 1, environment.updated_at, nil, nil, environment.slug)
end
delete_helm_releases(releases_to_delete)
@@ -190,6 +190,8 @@ module ReviewApps
rescue Gitlab::Error::Forbidden
puts "Review app '#{environment.name}' / '#{environment.slug}' (##{environment.id}) is forbidden: skipping it"
+ rescue Gitlab::Error::InternalServerError
+ puts "Review app '#{environment.name}' / '#{environment.slug}' (##{environment.id}) 500 error - ignoring it"
end
def stop_environment(environment, deployment)
diff --git a/scripts/review_apps/base-config.yaml b/scripts/review_apps/base-config.yaml
index 91c645a0ed9..f845dd04e8f 100644
--- a/scripts/review_apps/base-config.yaml
+++ b/scripts/review_apps/base-config.yaml
@@ -18,73 +18,91 @@ global:
preemptible: "true"
certmanager:
install: false
+
gitlab:
gitaly:
resources:
requests:
- cpu: 2400m
- memory: 1000M
+ cpu: 1200m
+ memory: 600Mi
limits:
- cpu: 3600m
- memory: 1500M
+ cpu: 1800m
+ memory: 1000Mi
persistence:
- size: 10G
+ size: 10Gi
storageClass: ssd
nodeSelector:
preemptible: "false"
podAnnotations:
<<: *safe-to-evict
+
gitlab-exporter:
enabled: false
- mailroom:
- enabled: false
- migrations:
- resources:
- requests:
- cpu: 350m
- memory: 200M
- limits:
- cpu: 700m
+
gitlab-shell:
resources:
requests:
cpu: 500m
- memory: 100M
+ memory: 100Mi
limits:
cpu: 750m
- memory: 150M
- maxReplicas: 3
+ memory: 150Mi
+ minReplicas: 1
+ maxReplicas: 1
hpa:
- targetAverageValue: 500m
+ cpu:
+ targetAverageValue: 500m
deployment:
livenessProbe:
timeoutSeconds: 5
+
+ kas:
+ minReplicas: 1
+ maxReplicas: 1
+
+ mailroom:
+ enabled: false
+
+ migrations:
+ resources:
+ requests:
+ cpu: 400m
+ memory: 920Mi
+ limits:
+ cpu: 600m
+ memory: 1100Mi
+
sidekiq:
resources:
requests:
cpu: 855m
- memory: 1927M
+ memory: 1927Mi
limits:
cpu: 1282m
- memory: 2890M
+ memory: 2890Mi
hpa:
- targetAverageValue: 650m
+ cpu:
+ targetAverageValue: 650m
+
toolbox:
resources:
requests:
cpu: 300m
- memory: 1927M
+ memory: 1927Mi
limits:
cpu: 450m
- memory: 2890M
+ memory: 2890Mi
+
webservice:
resources:
requests:
cpu: 746m
- memory: 2809M
+ memory: 2809Mi
limits:
cpu: 1119m
- memory: 4214M
+ memory: 4214Mi
+ minReplicas: 1
+ maxReplicas: 1
deployment:
readinessProbe:
initialDelaySeconds: 5 # Default is 0
@@ -94,38 +112,41 @@ gitlab:
resources:
requests:
cpu: 400m
- memory: 75M
+ memory: 75Mi
limits:
cpu: 600m
- memory: 113M
+ memory: 113Mi
readinessProbe:
initialDelaySeconds: 5 # Default is 0
periodSeconds: 15 # Default is 10
timeoutSeconds: 5 # Default is 2
+
gitlab-runner:
resources:
requests:
cpu: 675m
- memory: 100M
+ memory: 100Mi
limits:
cpu: 1015m
- memory: 150M
+ memory: 150Mi
nodeSelector:
preemptible: "true"
podAnnotations:
<<: *safe-to-evict
+
minio:
resources:
requests:
cpu: 9m
- memory: 128M
+ memory: 128Mi
limits:
cpu: 15m
- memory: 280M
+ memory: 280Mi
nodeSelector:
preemptible: "true"
podAnnotations:
<<: *safe-to-evict
+
nginx-ingress:
controller:
config:
@@ -133,10 +154,10 @@ nginx-ingress:
resources:
requests:
cpu: 300m
- memory: 450M
+ memory: 450Mi
limits:
cpu: 600m
- memory: 675M
+ memory: 675Mi
service:
enableHttp: false
livenessProbe:
@@ -149,53 +170,58 @@ nginx-ingress:
resources:
requests:
cpu: 5m
- memory: 12M
+ memory: 12Mi
limits:
cpu: 10m
- memory: 24M
+ memory: 24Mi
nodeSelector:
preemptible: "true"
+
postgresql:
metrics:
enabled: false
resources:
requests:
cpu: 600m
- memory: 1000M
+ memory: 1000Mi
limits:
cpu: 1300m
- memory: 1500M
+ memory: 1600Mi
master:
nodeSelector:
preemptible: "false"
podAnnotations:
<<: *safe-to-evict
+
prometheus:
install: false
+
redis:
metrics:
enabled: false
resources:
requests:
cpu: 100m
- memory: 60M
+ memory: 60Mi
limits:
cpu: 200m
- memory: 130M
+ memory: 130Mi
master:
nodeSelector:
preemptible: "true"
podAnnotations:
<<: *safe-to-evict
+
registry:
hpa:
minReplicas: 1
+ maxReplicas: 1
resources:
requests:
cpu: 100m
- memory: 30M
+ memory: 30Mi
limits:
cpu: 200m
- memory: 45M
+ memory: 45Mi
nodeSelector:
preemptible: "true"
diff --git a/scripts/review_apps/review-apps.sh b/scripts/review_apps/review-apps.sh
index 0fc245a409f..5883141a943 100755
--- a/scripts/review_apps/review-apps.sh
+++ b/scripts/review_apps/review-apps.sh
@@ -62,7 +62,7 @@ function previous_deploy_failed() {
return $status
}
-function delete_release() {
+function delete_helm_release() {
local namespace="${CI_ENVIRONMENT_SLUG}"
local release="${CI_ENVIRONMENT_SLUG}"
@@ -74,32 +74,6 @@ function delete_release() {
if deploy_exists "${namespace}" "${release}"; then
helm uninstall --namespace="${namespace}" "${release}"
fi
-}
-
-function delete_failed_release() {
- local namespace="${CI_ENVIRONMENT_SLUG}"
- local release="${CI_ENVIRONMENT_SLUG}"
-
- if [ -z "${release}" ]; then
- echoerr "No release given, aborting the delete!"
- return
- fi
-
- if ! deploy_exists "${namespace}" "${release}"; then
- echoinfo "No Review App with ${release} is currently deployed."
- else
- # Cleanup and previous installs, as FAILED and PENDING_UPGRADE will cause errors with `upgrade`
- if previous_deploy_failed "${namespace}" "${release}" ; then
- echoinfo "Review App deployment in bad state, cleaning up namespace ${release}"
- delete_namespace
- else
- echoinfo "Review App deployment in good state"
- fi
- fi
-}
-
-function delete_namespace() {
- local namespace="${CI_ENVIRONMENT_SLUG}"
if namespace_exists "${namespace}"; then
echoinfo "Deleting namespace ${namespace}..." true
@@ -143,7 +117,7 @@ function run_task() {
local ruby_cmd="${1}"
local toolbox_pod=$(get_pod "toolbox")
- kubectl exec --namespace "${namespace}" "${toolbox_pod}" -- gitlab-rails runner "${ruby_cmd}"
+ run_timed_command "kubectl exec --namespace \"${namespace}\" \"${toolbox_pod}\" -- gitlab-rails runner \"${ruby_cmd}\""
}
function disable_sign_ups() {
@@ -346,47 +320,44 @@ EOF
if [ -n "${REVIEW_APPS_EE_LICENSE_FILE}" ]; then
HELM_CMD=$(cat << EOF
${HELM_CMD} \
- --set global.gitlab.license.secret="shared-gitlab-license"
+ --set global.gitlab.license.secret="shared-gitlab-license"
EOF
)
fi
HELM_CMD=$(cat << EOF
${HELM_CMD} \
- --version="${CI_PIPELINE_ID}-${CI_JOB_ID}" \
- -f "${base_config_file}" \
- -v "${HELM_LOG_VERBOSITY:-1}" \
- "${release}" "gitlab-${GITLAB_HELM_CHART_REF}"
+ --version="${CI_PIPELINE_ID}-${CI_JOB_ID}" \
+ -f "${base_config_file}" \
+ -v "${HELM_LOG_VERBOSITY:-1}" \
+ "${release}" "gitlab-${GITLAB_HELM_CHART_REF}"
EOF
)
+ # Pretty-print the command for display
echoinfo "Deploying with:"
- echoinfo "${HELM_CMD}"
+ echo "${HELM_CMD}" | sed 's/ /\n\t/g'
- eval "${HELM_CMD}"
+ run_timed_command "eval \"${HELM_CMD}\""
}
function verify_deploy() {
- echoinfo "Verifying deployment at ${CI_ENVIRONMENT_URL}"
+ local namespace="${CI_ENVIRONMENT_SLUG}"
+
+ echoinfo "[$(date '+%H:%M:%S')] Verifying deployment at ${CI_ENVIRONMENT_URL}"
if retry "test_url \"${CI_ENVIRONMENT_URL}\""; then
- echoinfo "Review app is deployed to ${CI_ENVIRONMENT_URL}"
+ echoinfo "[$(date '+%H:%M:%S')] Review app is deployed to ${CI_ENVIRONMENT_URL}"
return 0
else
- echoerr "Review app is not available at ${CI_ENVIRONMENT_URL}: see the logs from cURL above for more details"
+ echoerr "[$(date '+%H:%M:%S')] Review app is not available at ${CI_ENVIRONMENT_URL}: see the logs from cURL above for more details"
return 1
fi
}
function display_deployment_debug() {
local namespace="${CI_ENVIRONMENT_SLUG}"
- local release="${CI_ENVIRONMENT_SLUG}"
-
- # Get all pods for this release
- echoinfo "Pods for release ${release}"
- kubectl get pods --namespace "${namespace}" -lrelease=${release}
- # Get all non-completed jobs
- echoinfo "Unsuccessful Jobs for release ${release}"
- kubectl get jobs --namespace "${namespace}" -lrelease=${release} --field-selector=status.successful!=1
+ echoinfo "Environment debugging data:"
+ kubectl get svc,pods,jobs --namespace "${namespace}"
}
diff --git a/scripts/rspec_helpers.sh b/scripts/rspec_helpers.sh
index 73030d2ad6c..14c5b94e921 100644
--- a/scripts/rspec_helpers.sh
+++ b/scripts/rspec_helpers.sh
@@ -11,7 +11,6 @@ function retrieve_tests_metadata() {
if [[ ! -f "${FLAKY_RSPEC_SUITE_REPORT_PATH}" ]]; then
curl --location -o "${FLAKY_RSPEC_SUITE_REPORT_PATH}" "https://gitlab-org.gitlab.io/gitlab/${FLAKY_RSPEC_SUITE_REPORT_PATH}" ||
- curl --location -o "${FLAKY_RSPEC_SUITE_REPORT_PATH}" "https://gitlab-org.gitlab.io/gitlab/rspec_flaky/report-suite.json" || # temporary back-compat
echo "{}" > "${FLAKY_RSPEC_SUITE_REPORT_PATH}"
fi
else
@@ -35,13 +34,7 @@ function retrieve_tests_metadata() {
if [[ ! -f "${FLAKY_RSPEC_SUITE_REPORT_PATH}" ]]; then
scripts/api/download_job_artifact.rb --endpoint "https://gitlab.com/api/v4" --project "${project_path}" --job-id "${test_metadata_job_id}" --artifact-path "${FLAKY_RSPEC_SUITE_REPORT_PATH}" ||
- scripts/api/download_job_artifact.rb --endpoint "https://gitlab.com/api/v4" --project "${project_path}" --job-id "${test_metadata_job_id}" --artifact-path "rspec_flaky/report-suite.json" || # temporary back-compat
echo "{}" > "${FLAKY_RSPEC_SUITE_REPORT_PATH}"
-
- # temporary back-compat
- if [[ -f "rspec_flaky/report-suite.json" ]]; then
- mv "rspec_flaky/report-suite.json" "${FLAKY_RSPEC_SUITE_REPORT_PATH}"
- fi
fi
else
echo "test_metadata_job_id couldn't be found!"
@@ -61,10 +54,16 @@ function update_tests_metadata() {
export FLAKY_RSPEC_GENERATE_REPORT="true"
scripts/merge-reports "${FLAKY_RSPEC_SUITE_REPORT_PATH}" ${rspec_flaky_folder_path}all_*.json
+
+ # Prune flaky tests that weren't flaky in the last 7 days, *after* updating the flaky tests detected
+ # in this pipeline, so that first_flaky_at for tests that are still flaky is maintained.
scripts/flaky_examples/prune-old-flaky-examples "${FLAKY_RSPEC_SUITE_REPORT_PATH}"
if [[ "$CI_PIPELINE_SOURCE" == "schedule" ]]; then
- scripts/insert-rspec-profiling-data
+ if [[ -n "$RSPEC_PROFILING_PGSSLKEY" ]]; then
+ chmod 0600 $RSPEC_PROFILING_PGSSLKEY
+ fi
+ PGSSLMODE=$RSPEC_PROFILING_PGSSLMODE PGSSLROOTCERT=$RSPEC_PROFILING_PGSSLROOTCERT PGSSLCERT=$RSPEC_PROFILING_PGSSLCERT PGSSLKEY=$RSPEC_PROFILING_PGSSLKEY scripts/insert-rspec-profiling-data
else
echo "Not inserting profiling data as the pipeline is not a scheduled one."
fi
diff --git a/scripts/rubocop-parse b/scripts/rubocop-parse
index 0a234df81cd..c99d66e99ad 100755
--- a/scripts/rubocop-parse
+++ b/scripts/rubocop-parse
@@ -39,7 +39,34 @@ module Helper
def ast(source, file: '', version: nil)
version ||= ruby_version
- puts RuboCop::AST::ProcessedSource.new(source, version).ast.to_s
+
+ ast = RuboCop::AST::ProcessedSource.new(source, version).ast
+ return ast if ast
+
+ warn "Syntax error in `#{source}`."
+ end
+
+ def pattern(string)
+ RuboCop::NodePattern.new(string)
+ end
+
+ def help!
+ puts <<~HELP
+
+ Use `ast(source_string, version: nil)` method to parse code and return its AST.
+ Use `pattern(string)` to compile RuboCop's node patterns.
+
+ See https://docs.rubocop.org/rubocop-ast/node_pattern.html.
+
+ Examples:
+ node = ast('puts :hello')
+
+ pat = pattern('`(sym :hello)')
+ pat.match(node) # => true
+
+ HELP
+
+ nil
end
def ruby_version
@@ -56,11 +83,12 @@ def start_irb
include Helper # rubocop:disable Style/MixinUsage
- puts "Ruby version: #{ruby_version}"
- puts
- puts "Use `ast(source_string, version: nil)` method to parse code and output AST. For example:"
- puts " ast('puts :hello')"
- puts
+ puts <<~BANNER
+ Ruby version: #{ruby_version}
+
+ Type `help!` for instructions and examples.
+
+ BANNER
IRB.start
end
@@ -103,12 +131,12 @@ elsif options.interactive
start_irb
end
elsif options.eval
- Helper.ast(options.eval)
+ puts Helper.ast(options.eval)
elsif files.any?
files.each do |file|
if File.file?(file)
source = File.read(file)
- Helper.ast(source, file: file)
+ puts Helper.ast(source, file: file)
else
warn "Skipping non-file #{file.inspect}"
end
diff --git a/scripts/security-harness b/scripts/security-harness
index df499be23f5..db397a6c1b1 100755
--- a/scripts/security-harness
+++ b/scripts/security-harness
@@ -75,6 +75,7 @@ end
def delete_hook
FileUtils.rm(HOOK_PATH)
+ system("git checkout master")
puts "#{SHELL_YELLOW}Security harness removed -- you can now push to all remotes.#{SHELL_CLEAR}"
end
diff --git a/scripts/setup/as-if-jh.sh b/scripts/setup/as-if-jh.sh
new file mode 100755
index 00000000000..38c3ac9b913
--- /dev/null
+++ b/scripts/setup/as-if-jh.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+prepare_jh_branch() {
+ set -eu # https://explainshell.com/explain?cmd=set+-eu
+
+ JH_BRANCH="$(./scripts/setup/find-jh-branch.rb)"
+ export JH_BRANCH
+
+ echoinfo "JH_BRANCH: ${JH_BRANCH}"
+}
+
+download_jh_path() {
+ set -eu # https://explainshell.com/explain?cmd=set+-eu
+
+ for path in "$@"; do
+ # https://www.shellcheck.net/wiki/SC3043
+ # shellcheck disable=SC3043
+ local output="${path}.tar.gz"
+
+ echoinfo "Downloading ${path}"
+
+ curl --location -o "${output}" -H "Private-Token: ${ADD_JH_FILES_TOKEN}" "https://gitlab.com/api/v4/projects/${GITLAB_JH_MIRROR_PROJECT}/repository/archive?sha=${JH_BRANCH}&path=${path}"
+
+ tar -zxf "${output}"
+ rm "${output}"
+ mv gitlab-"${JH_BRANCH}"-*/"${path}" ./
+ done
+}
diff --git a/scripts/used-feature-flags b/scripts/used-feature-flags
index 0966795f451..eb7e85be229 100755
--- a/scripts/used-feature-flags
+++ b/scripts/used-feature-flags
@@ -97,7 +97,7 @@ puts
if additional_flags.count > 0
puts "==================================================".green.bold
- puts "There are feature flags that appears to be unknown".yellow
+ puts "There are feature flags that appear to be unknown".yellow
puts
puts "They appear to be used by CI, but we do lack their YAML definition".yellow
puts "This is likely expected, so feel free to ignore that list:".yellow
@@ -110,7 +110,7 @@ end
if unused_flags.count > 0
puts "========================================".green.bold
- puts "These feature flags appears to be UNUSED".red.bold
+ puts "These feature flags appear to be UNUSED".red.bold
puts
puts "If they are really no longer needed REMOVE their .yml definition".red
puts "If they are needed you need to ENSURE that their usage is covered with specs to continue.".red
diff --git a/scripts/utils.sh b/scripts/utils.sh
index ea2b390f249..50ca7f558f6 100644
--- a/scripts/utils.sh
+++ b/scripts/utils.sh
@@ -5,7 +5,7 @@ function retry() {
for i in 2 1; do
sleep 3s
- echo "Retrying $i..."
+ echo "[$(date '+%H:%M:%S')] Retrying $i..."
if eval "$@"; then
return 0
fi