Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2022-10-20 12:40:42 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2022-10-20 12:40:42 +0300
commitee664acb356f8123f4f6b00b73c1e1cf0866c7fb (patch)
treef8479f94a28f66654c6a4f6fb99bad6b4e86a40e /scripts
parent62f7d5c5b69180e82ae8196b7b429eeffc8e7b4f (diff)
Add latest changes from gitlab-org/gitlab@15-5-stable-eev15.5.0-rc42
Diffstat (limited to 'scripts')
-rwxr-xr-xscripts/api/cancel_pipeline.rb2
-rwxr-xr-xscripts/api/download_job_artifact.rb2
-rwxr-xr-xscripts/api/get_job_id.rb2
-rw-r--r--scripts/api/pipeline_failed_jobs.rb47
-rwxr-xr-xscripts/build_qa_image32
-rwxr-xr-xscripts/changed-feature-flags2
-rwxr-xr-xscripts/clean-old-cached-assets8
-rwxr-xr-xscripts/failed_tests.rb2
-rwxr-xr-xscripts/generate-e2e-pipeline3
-rwxr-xr-xscripts/generate-failed-pipeline-slack-message.rb112
-rwxr-xr-xscripts/generate-rspec-foss-impact-pipeline66
-rw-r--r--scripts/gitlab_component_helpers.sh151
-rw-r--r--scripts/gitlab_workhorse_component_helpers.sh74
-rwxr-xr-xscripts/glfm/verify-all-generated-files-are-up-to-date.rb5
-rw-r--r--scripts/lib/glfm/constants.rb24
-rw-r--r--scripts/lib/glfm/render_static_html.rb16
-rw-r--r--scripts/lib/glfm/update_example_snapshots.rb57
-rw-r--r--scripts/lib/glfm/update_specification.rb140
-rw-r--r--scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb48
-rwxr-xr-xscripts/lint-json.sh8
-rw-r--r--scripts/migration_schema_validator.rb117
-rwxr-xr-xscripts/packages/automated_cleanup.rb126
-rwxr-xr-xscripts/perf/query_limiting_report.rb2
-rwxr-xr-xscripts/pipeline_test_report_builder.rb2
-rwxr-xr-xscripts/post_deployment_migrations_validator31
-rwxr-xr-xscripts/review_apps/automated_cleanup.rb418
-rwxr-xr-xscripts/review_apps/review-apps.sh27
-rw-r--r--scripts/rspec_helpers.sh54
-rwxr-xr-xscripts/rubocop-max-files-in-cache-check5
-rwxr-xr-xscripts/rubocop-parse77
-rwxr-xr-xscripts/setup/find-jh-branch.rb2
-rwxr-xr-xscripts/slack2
-rwxr-xr-xscripts/static-analysis2
-rwxr-xr-xscripts/trigger-build.rb2
-rw-r--r--scripts/utils.sh22
-rwxr-xr-xscripts/validate_migration_schema116
36 files changed, 1278 insertions, 528 deletions
diff --git a/scripts/api/cancel_pipeline.rb b/scripts/api/cancel_pipeline.rb
index 2de50dcee80..2667cfb9733 100755
--- a/scripts/api/cancel_pipeline.rb
+++ b/scripts/api/cancel_pipeline.rb
@@ -25,7 +25,7 @@ class CancelPipeline
attr_reader :project, :pipeline_id, :client
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
options = API::DEFAULT_OPTIONS.dup
OptionParser.new do |opts|
diff --git a/scripts/api/download_job_artifact.rb b/scripts/api/download_job_artifact.rb
index 23202ad3912..394ad8f3a3d 100755
--- a/scripts/api/download_job_artifact.rb
+++ b/scripts/api/download_job_artifact.rb
@@ -60,7 +60,7 @@ class ArtifactFinder
end
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
options = API::DEFAULT_OPTIONS.dup
OptionParser.new do |opts|
diff --git a/scripts/api/get_job_id.rb b/scripts/api/get_job_id.rb
index 2ee769d58f4..12535106a4c 100755
--- a/scripts/api/get_job_id.rb
+++ b/scripts/api/get_job_id.rb
@@ -95,7 +95,7 @@ class JobFinder
end
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
options = JobFinder::DEFAULT_OPTIONS.dup
OptionParser.new do |opts|
diff --git a/scripts/api/pipeline_failed_jobs.rb b/scripts/api/pipeline_failed_jobs.rb
new file mode 100644
index 00000000000..c25567af698
--- /dev/null
+++ b/scripts/api/pipeline_failed_jobs.rb
@@ -0,0 +1,47 @@
+# frozen_string_literal: true
+
+require 'gitlab'
+require 'optparse'
+require_relative 'default_options'
+
+class PipelineFailedJobs
+ def initialize(options)
+ @project = options.delete(:project)
+ @pipeline_id = options.delete(:pipeline_id)
+ @exclude_allowed_to_fail_jobs = options.delete(:exclude_allowed_to_fail_jobs)
+
+ # Force the token to be a string so that if api_token is nil, it's set to '',
+ # allowing unauthenticated requests (for forks).
+ api_token = options.delete(:api_token).to_s
+
+ warn "No API token given." if api_token.empty?
+
+ @client = Gitlab.client(
+ endpoint: options.delete(:endpoint) || API::DEFAULT_OPTIONS[:endpoint],
+ private_token: api_token
+ )
+ end
+
+ def execute
+ failed_jobs = []
+
+ client.pipeline_jobs(project, pipeline_id, scope: 'failed', per_page: 100).auto_paginate do |job|
+ next if exclude_allowed_to_fail_jobs && job.allow_failure
+
+ failed_jobs << job
+ end
+
+ client.pipeline_bridges(project, pipeline_id, scope: 'failed', per_page: 100).auto_paginate do |job|
+ next if exclude_allowed_to_fail_jobs && job.allow_failure
+
+ job.web_url = job.downstream_pipeline.web_url # job.web_url is linking to an invalid page
+ failed_jobs << job
+ end
+
+ failed_jobs
+ end
+
+ private
+
+ attr_reader :project, :pipeline_id, :exclude_allowed_to_fail_jobs, :client
+end
diff --git a/scripts/build_qa_image b/scripts/build_qa_image
new file mode 100755
index 00000000000..f4ecb8ed6b6
--- /dev/null
+++ b/scripts/build_qa_image
@@ -0,0 +1,32 @@
+#!/bin/sh
+
+QA_IMAGE_NAME="gitlab-ee-qa"
+
+if [ "${CI_PROJECT_NAME}" == "gitlabhq" ] || [ "${CI_PROJECT_NAME}" == "gitlab-foss" ]; then
+ QA_IMAGE_NAME="gitlab-ce-qa"
+fi
+
+# Tag with commit SHA by default
+QA_IMAGE="${CI_REGISTRY}/${CI_PROJECT_PATH}/${QA_IMAGE_NAME}:${CI_COMMIT_SHA}"
+# For branches, tag with slugified branch name. For tags, use the tag directly
+QA_IMAGE_BRANCH="${CI_REGISTRY}/${CI_PROJECT_PATH}/${QA_IMAGE_NAME}:${CI_COMMIT_TAG:-$CI_COMMIT_REF_SLUG}"
+
+DESTINATIONS="--destination=${QA_IMAGE} --destination=${QA_IMAGE_BRANCH}"
+
+# Auto-deploy tag format uses first 12 letters of commit SHA. Tag with that
+# reference also for EE images.
+if [ "${QA_IMAGE_NAME}" == "gitlab-ee-qa" ]; then
+ QA_IMAGE_FOR_AUTO_DEPLOY="${CI_REGISTRY}/${CI_PROJECT_PATH}/${QA_IMAGE_NAME}:${CI_COMMIT_SHA:0:11}"
+ DESTINATIONS="${DESTINATIONS} --destination=$QA_IMAGE_FOR_AUTO_DEPLOY"
+fi
+
+echo "Building QA image for destinations: ${DESTINATIONS}"
+
+/kaniko/executor \
+ --context="${CI_PROJECT_DIR}" \
+ --dockerfile="${CI_PROJECT_DIR}/qa/Dockerfile" \
+ --build-arg=CHROME_VERSION="${CHROME_VERSION}" \
+ --build-arg=DOCKER_VERSION="${DOCKER_VERSION}" \
+ --build-arg=QA_BUILD_TARGET="${QA_BUILD_TARGET:-qa}" \
+ --cache=true \
+ ${DESTINATIONS}
diff --git a/scripts/changed-feature-flags b/scripts/changed-feature-flags
index ded6156bfa8..8c1b219e5a6 100755
--- a/scripts/changed-feature-flags
+++ b/scripts/changed-feature-flags
@@ -90,7 +90,7 @@ class GetFeatureFlagsFromFiles
end
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
options = API::DEFAULT_OPTIONS.dup
OptionParser.new do |opts|
diff --git a/scripts/clean-old-cached-assets b/scripts/clean-old-cached-assets
deleted file mode 100755
index 20889b7ffe6..00000000000
--- a/scripts/clean-old-cached-assets
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/usr/bin/env bash
-
-# Clean up cached files that are older than 4 days
-find tmp/cache/assets/sprockets/ -type f -mtime +4 -execdir rm -- "{}" \;
-find tmp/cache/webpack-dlls/ -maxdepth 1 -type d -mtime +4 -exec rm -rf -- "{}" \;
-
-du -d 0 -h tmp/cache/assets/sprockets | cut -f1 | xargs -I % echo "tmp/cache/assets/sprockets/ is currently %"
-du -d 0 -h tmp/cache/webpack-dlls | cut -f1 | xargs -I % echo "tmp/cache/webpack-dlls is currently %"
diff --git a/scripts/failed_tests.rb b/scripts/failed_tests.rb
index fb13df7bf62..319961d277c 100755
--- a/scripts/failed_tests.rb
+++ b/scripts/failed_tests.rb
@@ -87,7 +87,7 @@ class FailedTests
end
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
options = {
previous_tests_report_path: 'test_results/previous/test_reports.json',
output_directory: 'tmp/previous_failed_tests/',
diff --git a/scripts/generate-e2e-pipeline b/scripts/generate-e2e-pipeline
index f541ae6665c..0d46a117719 100755
--- a/scripts/generate-e2e-pipeline
+++ b/scripts/generate-e2e-pipeline
@@ -18,8 +18,11 @@ if [ "$QA_SKIP_ALL_TESTS" == "true" ]; then
exit
fi
+# set custom cache key to override default cache in pipeline-common because we use bundle to install gitlab-qa gem
+qa_cache_key="qa-e2e-ruby-${RUBY_VERSION}-$(md5sum qa/Gemfile.lock | awk '{ print $1 }')"
variables=$(cat <<YML
variables:
+ GITLAB_QA_CACHE_KEY: "$qa_cache_key"
GITLAB_VERSION: "$(cat VERSION)"
COLORIZED_LOGS: "true"
QA_TESTS: "$QA_TESTS"
diff --git a/scripts/generate-failed-pipeline-slack-message.rb b/scripts/generate-failed-pipeline-slack-message.rb
new file mode 100755
index 00000000000..699e32872e6
--- /dev/null
+++ b/scripts/generate-failed-pipeline-slack-message.rb
@@ -0,0 +1,112 @@
+#!/usr/bin/env ruby
+
+# frozen_string_literal: true
+
+require_relative 'api/pipeline_failed_jobs'
+
+finder_options = API::DEFAULT_OPTIONS.dup.merge(exclude_allowed_to_fail_jobs: true)
+failed_jobs = PipelineFailedJobs.new(finder_options).execute
+
+class SlackReporter
+ DEFAULT_FAILED_PIPELINE_REPORT_FILE = 'failed_pipeline_report.json'
+
+ def initialize(failed_jobs)
+ @failed_jobs = failed_jobs
+ @failed_pipeline_report_file = ENV.fetch('FAILED_PIPELINE_REPORT_FILE', DEFAULT_FAILED_PIPELINE_REPORT_FILE)
+ end
+
+ def report
+ payload = {
+ channel: ENV['SLACK_CHANNEL'],
+ username: "Failed pipeline reporter",
+ icon_emoji: ":boom:",
+ text: "*#{title}*",
+ blocks: [
+ {
+ type: "section",
+ text: {
+ type: "mrkdwn",
+ text: "*#{title}*"
+ }
+ },
+ {
+ type: "section",
+ fields: [
+ {
+ type: "mrkdwn",
+ text: "*Commit*\n#{commit_link}"
+ },
+ {
+ type: "mrkdwn",
+ text: "*Triggered by*\n#{triggered_by_link}"
+ }
+ ]
+ },
+ {
+ type: "section",
+ fields: [
+ {
+ type: "mrkdwn",
+ text: "*Source*\n#{source} from #{project_link}"
+ },
+ {
+ type: "mrkdwn",
+ text: "*Duration*\n#{pipeline_duration} minutes"
+ }
+ ]
+ },
+ {
+ type: "section",
+ text: {
+ type: "mrkdwn",
+ text: "*Failed jobs (#{failed_jobs.size}):* #{failed_jobs_list}"
+ }
+ }
+ ]
+ }
+
+ File.write(failed_pipeline_report_file, JSON.pretty_generate(payload))
+ end
+
+ private
+
+ attr_reader :failed_jobs, :failed_pipeline_report_file
+
+ def title
+ "Pipeline #{pipeline_link} for #{branch_link} failed"
+ end
+
+ def pipeline_link
+ "<#{ENV['CI_PIPELINE_URL']}|##{ENV['CI_PIPELINE_ID']}>"
+ end
+
+ def branch_link
+ "<#{ENV['CI_PROJECT_URL']}/-/commits/#{ENV['CI_COMMIT_REF_NAME']}|`#{ENV['CI_COMMIT_REF_NAME']}`>"
+ end
+
+ def pipeline_duration
+ ((Time.now - Time.parse(ENV['CI_PIPELINE_CREATED_AT'])) / 60.to_f).round(2)
+ end
+
+ def commit_link
+ "<#{ENV['CI_PROJECT_URL']}/-/commit/#{ENV['CI_COMMIT_SHA']}|#{ENV['CI_COMMIT_TITLE']}>"
+ end
+
+ def source
+ "`#{ENV['CI_PIPELINE_SOURCE']}`"
+ end
+
+ def project_link
+ "<#{ENV['CI_PROJECT_URL']}|#{ENV['CI_PROJECT_NAME']}>"
+ end
+
+ def triggered_by_link
+ "<#{ENV['CI_SERVER_URL']}/#{ENV['GITLAB_USER_LOGIN']}|#{ENV['GITLAB_USER_NAME']}>"
+ end
+
+ def failed_jobs_list
+ failed_jobs.map { |job| "<#{job.web_url}|#{job.name}>" }.join(', ')
+ end
+end
+
+SlackReporter.new(failed_jobs).report
diff --git a/scripts/generate-rspec-foss-impact-pipeline b/scripts/generate-rspec-foss-impact-pipeline
new file mode 100755
index 00000000000..3277f38ebe1
--- /dev/null
+++ b/scripts/generate-rspec-foss-impact-pipeline
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+# Script to generate `rspec foss-impact` test child pipeline with dynamically parallelized jobs.
+
+source scripts/utils.sh
+
+rspec_matching_tests_foss_path="${1}"
+pipeline_yml="${2}"
+
+test_file_count=$(wc -w "${rspec_matching_tests_foss_path}" | awk '{ print $1 }')
+echoinfo "test_file_count: ${test_file_count}"
+
+if [[ "${test_file_count}" -eq 0 ]]; then
+ skip_pipeline=".gitlab/ci/_skip.yml"
+
+ echo "Using ${skip_pipeline} due to no impacted FOSS rspec tests to run"
+ cp $skip_pipeline "$pipeline_yml"
+ exit
+fi
+
+# As of 2022-09-01:
+# $ find spec -type f | wc -l
+# 12825
+# and
+# $ find ee/spec -type f | wc -l
+# 5610
+# which gives a total of 18435 test files (`number_of_tests_in_total_in_the_test_suite`).
+#
+# Total time to run all tests (based on https://gitlab-org.gitlab.io/rspec_profiling_stats/) is 170183 seconds (`duration_of_the_test_suite_in_seconds`).
+#
+# This gives an approximate 170183 / 18435 = 9.2 seconds per test file (`average_test_file_duration_in_seconds`).
+#
+# If we want each test job to finish in 10 minutes, given we have 3 minutes of setup (`setup_duration_in_seconds`), then we need to give 7 minutes of testing to each test node (`optimal_test_runtime_duration_in_seconds`).
+# (7 * 60) / 9.2 = 45.6
+#
+# So if we'd want to run the full test suites in 10 minutes (`optimal_test_job_duration_in_seconds`), we'd need to run at max 45 test file per nodes (`optimal_test_file_count_per_node`).
+number_of_tests_in_total_in_the_test_suite=18435
+duration_of_the_test_suite_in_seconds=170183
+optimal_test_job_duration_in_seconds=600 # 10 minutes
+setup_duration_in_seconds=180 # 3 minutes
+
+optimal_test_runtime_duration_in_seconds=$(( optimal_test_job_duration_in_seconds - setup_duration_in_seconds ))
+echoinfo "optimal_test_runtime_duration_in_seconds: ${optimal_test_runtime_duration_in_seconds}"
+
+average_test_file_duration_in_seconds=$(( duration_of_the_test_suite_in_seconds / number_of_tests_in_total_in_the_test_suite ))
+echoinfo "average_test_file_duration_in_seconds: ${average_test_file_duration_in_seconds}"
+
+optimal_test_file_count_per_node=$(( optimal_test_runtime_duration_in_seconds / average_test_file_duration_in_seconds ))
+echoinfo "optimal_test_file_count_per_node: ${optimal_test_file_count_per_node}"
+
+node_count=$(( test_file_count / optimal_test_file_count_per_node ))
+echoinfo "node_count: ${node_count}"
+
+echoinfo "Optimal node count for 'rspec foss-impact' jobs is ${node_count}."
+
+MAX_NODES_COUNT=50 # Maximum parallelization allowed by GitLab
+if [[ "${node_count}" -gt "${MAX_NODES_COUNT}" ]]; then
+ echoinfo "We don't want to parallelize 'rspec foss-impact' to more than ${MAX_NODES_COUNT} jobs for now! Decreasing the parallelization to ${MAX_NODES_COUNT}."
+ node_count=${MAX_NODES_COUNT}
+fi
+
+ruby -rerb -e "puts ERB.new(File.read('.gitlab/ci/rails/rspec-foss-impact.gitlab-ci.yml.erb')).result_with_hash(parallel_value: ${node_count})" > "${pipeline_yml}"
+
+echosuccess "Generated ${pipeline_yml} pipeline with following content:"
+cat "${pipeline_yml}"
diff --git a/scripts/gitlab_component_helpers.sh b/scripts/gitlab_component_helpers.sh
new file mode 100644
index 00000000000..0d72f940036
--- /dev/null
+++ b/scripts/gitlab_component_helpers.sh
@@ -0,0 +1,151 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+export CURL_TOKEN_HEADER="${CURL_TOKEN_HEADER:-"JOB-TOKEN"}"
+
+export GITLAB_COM_CANONICAL_PROJECT_ID="278964" # https://gitlab.com/gitlab-org/gitlab
+export JIHULAB_COM_CANONICAL_PROJECT_ID="13953" # https://jihulab.com/gitlab-cn/gitlab
+export CANONICAL_PROJECT_ID="${GITLAB_COM_CANONICAL_PROJECT_ID}"
+
+# By default, we only want to store/retrieve packages from GitLab.com...
+export API_V4_URL="https://gitlab.com/api/v4"
+
+# Unless we're in the JiHu project, which needs to use its own package registry
+if [[ "${CI_SERVER_HOST}" = "jihulab.com" ]]; then
+ export API_V4_URL="${CI_API_V4_URL}"
+ export CANONICAL_PROJECT_ID="${JIHULAB_COM_CANONICAL_PROJECT_ID}"
+fi
+
+export API_PACKAGES_BASE_URL="${API_V4_URL}/projects/${CANONICAL_PROJECT_ID}/packages/generic"
+
+export UPLOAD_TO_CURRENT_SERVER="false"
+# We only want to upload artifacts to https://gitlab.com and https://jihulab.com instances
+if [[ "${CI_SERVER_HOST}" = "gitlab.com" ]] || [[ "${CI_SERVER_HOST}" = "jihulab.com" ]]; then
+ export UPLOAD_TO_CURRENT_SERVER="true"
+fi
+
+export UPLOAD_PACKAGE_FLAG="false"
+# And only if we're in a pipeline from the canonical project
+if [[ "${UPLOAD_TO_CURRENT_SERVER}" = "true" ]] && [[ "${CI_PROJECT_ID}" = "${CANONICAL_PROJECT_ID}" ]]; then
+ export UPLOAD_PACKAGE_FLAG="true"
+fi
+
+# Workhorse constants
+export GITLAB_WORKHORSE_BINARIES_LIST="gitlab-resize-image gitlab-zip-cat gitlab-zip-metadata gitlab-workhorse"
+export GITLAB_WORKHORSE_PACKAGE_FILES_LIST="${GITLAB_WORKHORSE_BINARIES_LIST} WORKHORSE_TREE"
+export GITLAB_WORKHORSE_TREE=${GITLAB_WORKHORSE_TREE:-$(git rev-parse HEAD:workhorse)}
+export GITLAB_WORKHORSE_PACKAGE="workhorse-${GITLAB_WORKHORSE_TREE}.tar.gz"
+export GITLAB_WORKHORSE_PACKAGE_URL="${API_PACKAGES_BASE_URL}/${GITLAB_WORKHORSE_FOLDER}/${GITLAB_WORKHORSE_TREE}/${GITLAB_WORKHORSE_PACKAGE}"
+
+# Assets constants
+export GITLAB_ASSETS_PATHS_LIST="cached-assets-hash.txt app/assets/javascripts/locale/**/app.js public/assets/"
+export GITLAB_ASSETS_PACKAGE_VERSION="v2" # bump this version each time GITLAB_ASSETS_PATHS_LIST is changed
+
+export GITLAB_EDITION="ee"
+if [[ "${FOSS_ONLY:-no}" = "1" ]] || [[ "${CI_PROJECT_NAME}" = "gitlab-foss" ]]; then
+ export GITLAB_EDITION="foss"
+fi
+
+export GITLAB_ASSETS_HASH="${GITLAB_ASSETS_HASH:-"NO_HASH"}"
+export GITLAB_ASSETS_PACKAGE="assets-${NODE_ENV}-${GITLAB_EDITION}-${GITLAB_ASSETS_HASH}-${GITLAB_ASSETS_PACKAGE_VERSION}.tar.gz"
+export GITLAB_ASSETS_PACKAGE_URL="${API_PACKAGES_BASE_URL}/assets/${NODE_ENV}-${GITLAB_EDITION}-${GITLAB_ASSETS_HASH}/${GITLAB_ASSETS_PACKAGE}"
+
+# Generic helper functions
+function archive_doesnt_exist() {
+ local package_url="${1}"
+
+ status=$(curl -I --silent --retry 3 --output /dev/null -w "%{http_code}" "${package_url}")
+
+ [[ "${status}" != "200" ]]
+}
+
+function create_package() {
+ local archive_filename="${1}"
+ local paths_to_archive="${2}"
+ local tar_working_folder="${3:-.}"
+
+ echoinfo "Running 'tar -czvf ${archive_filename} -C ${tar_working_folder} ${paths_to_archive}'"
+ tar -czf ${archive_filename} -C ${tar_working_folder} ${paths_to_archive}
+ du -h ${archive_filename}
+}
+
+function upload_package() {
+ local archive_filename="${1}"
+ local package_url="${2}"
+ local token_header="${CURL_TOKEN_HEADER}"
+ local token="${CI_JOB_TOKEN}"
+
+ if [[ "${UPLOAD_PACKAGE_FLAG}" = "false" ]]; then
+ echoerr "The archive ${archive_filename} isn't supposed to be uploaded for this instance (${CI_SERVER_HOST}) & project (${CI_PROJECT_PATH})!"
+ exit 1
+ fi
+
+ echoinfo "Uploading ${archive_filename} to ${package_url} ..."
+ curl --fail --silent --retry 3 --header "${token_header}: ${token}" --upload-file "${archive_filename}" "${package_url}"
+}
+
+function read_curl_package() {
+ local package_url="${1}"
+ local token_header="${CURL_TOKEN_HEADER}"
+ local token="${CI_JOB_TOKEN}"
+
+ echoinfo "Downloading from ${package_url} ..."
+
+ curl --fail --silent --retry 3 --header "${token_header}: ${token}" "${package_url}"
+}
+
+function extract_package() {
+ local tar_working_folder="${1:-.}"
+ mkdir -p "${tar_working_folder}"
+
+ echoinfo "Extracting archive to ${tar_working_folder}"
+
+ tar -xz -C ${tar_working_folder} < /dev/stdin
+}
+
+# Workhorse functions
+function gitlab_workhorse_archive_doesnt_exist() {
+ archive_doesnt_exist "${GITLAB_WORKHORSE_PACKAGE_URL}"
+}
+
+function create_gitlab_workhorse_package() {
+ create_package "${GITLAB_WORKHORSE_PACKAGE}" "${GITLAB_WORKHORSE_FOLDER}" "${TMP_TEST_FOLDER}"
+}
+
+function upload_gitlab_workhorse_package() {
+ upload_package "${GITLAB_WORKHORSE_PACKAGE}" "${GITLAB_WORKHORSE_PACKAGE_URL}"
+}
+
+function download_and_extract_gitlab_workhorse_package() {
+ read_curl_package "${GITLAB_WORKHORSE_PACKAGE_URL}" | extract_package "${TMP_TEST_FOLDER}"
+}
+
+function select_gitlab_workhorse_essentials() {
+ local tmp_path="${CI_PROJECT_DIR}/tmp/${GITLAB_WORKHORSE_FOLDER}"
+ local original_gitlab_workhorse_path="${TMP_TEST_GITLAB_WORKHORSE_PATH}"
+
+ mkdir -p ${tmp_path}
+ cd ${original_gitlab_workhorse_path} && mv ${GITLAB_WORKHORSE_PACKAGE_FILES_LIST} ${tmp_path} && cd -
+ rm -rf ${original_gitlab_workhorse_path}
+
+ # Move the temp folder to its final destination
+ mv ${tmp_path} ${TMP_TEST_FOLDER}
+}
+
+# Assets functions
+function gitlab_assets_archive_doesnt_exist() {
+ archive_doesnt_exist "${GITLAB_ASSETS_PACKAGE_URL}"
+}
+
+function download_and_extract_gitlab_assets() {
+ read_curl_package "${GITLAB_ASSETS_PACKAGE_URL}" | extract_package
+}
+
+function create_gitlab_assets_package() {
+ create_package "${GITLAB_ASSETS_PACKAGE}" "${GITLAB_ASSETS_PATHS_LIST}"
+}
+
+function upload_gitlab_assets_package() {
+ upload_package "${GITLAB_ASSETS_PACKAGE}" "${GITLAB_ASSETS_PACKAGE_URL}"
+}
diff --git a/scripts/gitlab_workhorse_component_helpers.sh b/scripts/gitlab_workhorse_component_helpers.sh
deleted file mode 100644
index ebd43a125b9..00000000000
--- a/scripts/gitlab_workhorse_component_helpers.sh
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-
-set -euo pipefail
-
-export CURL_TOKEN_HEADER="${CURL_TOKEN_HEADER:-"JOB-TOKEN"}"
-export GITLAB_WORKHORSE_BINARIES_LIST="gitlab-resize-image gitlab-zip-cat gitlab-zip-metadata gitlab-workhorse"
-export GITLAB_WORKHORSE_PACKAGE_FILES_LIST="${GITLAB_WORKHORSE_BINARIES_LIST} WORKHORSE_TREE"
-export GITLAB_WORKHORSE_TREE=${GITLAB_WORKHORSE_TREE:-$(git rev-parse HEAD:workhorse)}
-export GITLAB_WORKHORSE_PACKAGE="workhorse-${GITLAB_WORKHORSE_TREE}.tar.gz"
-export GITLAB_WORKHORSE_PACKAGE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${GITLAB_WORKHORSE_FOLDER}/${GITLAB_WORKHORSE_TREE}/${GITLAB_WORKHORSE_PACKAGE}"
-
-function gitlab_workhorse_archive_doesnt_exist() {
- local package_url="${GITLAB_WORKHORSE_PACKAGE_URL}"
-
- status=$(curl -I --silent --retry 3 --output /dev/null -w "%{http_code}" "${package_url}")
-
- [[ "${status}" != "200" ]]
-}
-
-function create_gitlab_workhorse_package() {
- local archive_filename="${GITLAB_WORKHORSE_PACKAGE}"
- local folder_to_archive="${GITLAB_WORKHORSE_FOLDER}"
- local workhorse_folder_path="${TMP_TEST_GITLAB_WORKHORSE_PATH}"
- local tar_working_folder="${TMP_TEST_FOLDER}"
-
- echoinfo "Running 'tar -czvf ${archive_filename} -C ${tar_working_folder} ${folder_to_archive}'"
- tar -czvf ${archive_filename} -C ${tar_working_folder} ${folder_to_archive}
- du -h ${archive_filename}
-}
-
-function extract_gitlab_workhorse_package() {
- local tar_working_folder="${TMP_TEST_FOLDER}"
- mkdir -p "${tar_working_folder}"
-
- echoinfo "Extracting archive to ${tar_working_folder}"
-
- tar -xzv -C ${tar_working_folder} < /dev/stdin
-}
-
-function upload_gitlab_workhorse_package() {
- local archive_filename="${GITLAB_WORKHORSE_PACKAGE}"
- local package_url="${GITLAB_WORKHORSE_PACKAGE_URL}"
- local token_header="${CURL_TOKEN_HEADER}"
- local token="${CI_JOB_TOKEN}"
-
- echoinfo "Uploading ${archive_filename} to ${package_url} ..."
- curl --fail --silent --retry 3 --header "${token_header}: ${token}" --upload-file "${archive_filename}" "${package_url}"
-}
-
-function read_curl_gitlab_workhorse_package() {
- local package_url="${GITLAB_WORKHORSE_PACKAGE_URL}"
- local token_header="${CURL_TOKEN_HEADER}"
- local token="${CI_JOB_TOKEN}"
-
- echoinfo "Downloading from ${package_url} ..."
-
- curl --fail --silent --retry 3 --header "${token_header}: ${token}" "${package_url}"
-}
-
-function download_and_extract_gitlab_workhorse_package() {
- read_curl_gitlab_workhorse_package | extract_gitlab_workhorse_package
-}
-
-function select_gitlab_workhorse_essentials() {
- local tmp_path="${CI_PROJECT_DIR}/tmp/${GITLAB_WORKHORSE_FOLDER}"
- local original_gitlab_workhorse_path="${TMP_TEST_GITLAB_WORKHORSE_PATH}"
-
- mkdir -p ${tmp_path}
- cd ${original_gitlab_workhorse_path} && mv ${GITLAB_WORKHORSE_PACKAGE_FILES_LIST} ${tmp_path} && cd -
- rm -rf ${original_gitlab_workhorse_path}
-
- # Move the temp folder to its final destination
- mv ${tmp_path} ${TMP_TEST_FOLDER}
-}
diff --git a/scripts/glfm/verify-all-generated-files-are-up-to-date.rb b/scripts/glfm/verify-all-generated-files-are-up-to-date.rb
new file mode 100755
index 00000000000..7710997e3ed
--- /dev/null
+++ b/scripts/glfm/verify-all-generated-files-are-up-to-date.rb
@@ -0,0 +1,5 @@
+#!/usr/bin/env ruby
+# frozen_string_literal: true
+
+require_relative '../lib/glfm/verify_all_generated_files_are_up_to_date'
+Glfm::VerifyAllGeneratedFilesAreUpToDate.new.process
diff --git a/scripts/lib/glfm/constants.rb b/scripts/lib/glfm/constants.rb
index e5790bbdd88..d020d2fec5c 100644
--- a/scripts/lib/glfm/constants.rb
+++ b/scripts/lib/glfm/constants.rb
@@ -10,25 +10,29 @@ module Glfm
# GitHub Flavored Markdown specification file
GHFM_SPEC_TXT_URI = 'https://raw.githubusercontent.com/github/cmark-gfm/master/test/spec.txt'
GHFM_SPEC_VERSION = '0.29'
- GHFM_SPEC_TXT_FILENAME = "ghfm_spec_v_#{GHFM_SPEC_VERSION}.txt"
- GHFM_SPEC_TXT_PATH = specification_path.join('input/github_flavored_markdown', GHFM_SPEC_TXT_FILENAME)
+ GHFM_SPEC_MD_FILENAME = "ghfm_spec_v_#{GHFM_SPEC_VERSION}.md"
+ GHFM_SPEC_MD_PATH = specification_path.join('input/github_flavored_markdown', GHFM_SPEC_MD_FILENAME)
# GitLab Flavored Markdown specification files
specification_input_glfm_path = specification_path.join('input/gitlab_flavored_markdown')
- GLFM_INTRO_TXT_PATH = specification_input_glfm_path.join('glfm_intro.txt')
- GLFM_EXAMPLES_TXT_PATH = specification_input_glfm_path.join('glfm_canonical_examples.txt')
+ GLFM_INTRO_MD_PATH = specification_input_glfm_path.join('glfm_intro.md')
+ GLFM_OFFICIAL_SPECIFICATION_EXAMPLES_MD_PATH =
+ specification_input_glfm_path.join('glfm_official_specification_examples.md')
+ GLFM_INTERNAL_EXTENSION_EXAMPLES_MD_PATH = specification_input_glfm_path.join('glfm_internal_extension_examples.md')
GLFM_EXAMPLE_STATUS_YML_PATH = specification_input_glfm_path.join('glfm_example_status.yml')
GLFM_EXAMPLE_METADATA_YML_PATH =
specification_input_glfm_path.join('glfm_example_metadata.yml')
GLFM_EXAMPLE_NORMALIZATIONS_YML_PATH = specification_input_glfm_path.join('glfm_example_normalizations.yml')
- GLFM_SPEC_TXT_PATH = specification_path.join('output/spec.txt')
+ GLFM_SPEC_OUTPUT_PATH = specification_path.join('output')
+ GLFM_SPEC_TXT_PATH = GLFM_SPEC_OUTPUT_PATH.join('spec.txt')
+ GLFM_SPEC_HTML_PATH = GLFM_SPEC_OUTPUT_PATH.join('spec.html')
# Example Snapshot (ES) files
- es_fixtures_path = File.expand_path("../../../glfm_specification/example_snapshots", __dir__)
- ES_EXAMPLES_INDEX_YML_PATH = File.join(es_fixtures_path, 'examples_index.yml')
- ES_MARKDOWN_YML_PATH = File.join(es_fixtures_path, 'markdown.yml')
- ES_HTML_YML_PATH = File.join(es_fixtures_path, 'html.yml')
- ES_PROSEMIRROR_JSON_YML_PATH = File.join(es_fixtures_path, 'prosemirror_json.yml')
+ EXAMPLE_SNAPSHOTS_PATH = File.expand_path("../../../glfm_specification/example_snapshots", __dir__)
+ ES_EXAMPLES_INDEX_YML_PATH = File.join(EXAMPLE_SNAPSHOTS_PATH, 'examples_index.yml')
+ ES_MARKDOWN_YML_PATH = File.join(EXAMPLE_SNAPSHOTS_PATH, 'markdown.yml')
+ ES_HTML_YML_PATH = File.join(EXAMPLE_SNAPSHOTS_PATH, 'html.yml')
+ ES_PROSEMIRROR_JSON_YML_PATH = File.join(EXAMPLE_SNAPSHOTS_PATH, 'prosemirror_json.yml')
# Other constants used for processing files
GLFM_SPEC_TXT_HEADER = <<~MARKDOWN
diff --git a/scripts/lib/glfm/render_static_html.rb b/scripts/lib/glfm/render_static_html.rb
index 8d72aec7c3b..6af73cd845d 100644
--- a/scripts/lib/glfm/render_static_html.rb
+++ b/scripts/lib/glfm/render_static_html.rb
@@ -20,21 +20,26 @@ require_relative 'shared'
# Factorybot factory methods to create persisted model objects with stable
# and consistent data values, to ensure consistent example snapshot HTML
# across various machines and environments. RSpec also makes it easy to invoke
-# the API # and obtain the response.
+# the API and obtain the response.
#
# It is intended to be invoked as a helper subprocess from the `update_example_snapshots.rb`
# script class. It's not intended to be run or used directly. This usage is also reinforced
# by not naming the file with a `_spec.rb` ending.
-RSpec.describe 'Render Static HTML', :api, type: :request do # rubocop:disable RSpec/TopLevelDescribePath
+RSpec.describe 'Render Static HTML', :api, type: :request do
include Glfm::Constants
include Glfm::Shared
# noinspection RailsParamDefResolve (RubyMine can't find the shared context from this file location)
include_context 'with GLFM example snapshot fixtures'
- it 'can create a project dependency graph using factories' do
+ it do
markdown_hash = YAML.safe_load(File.open(ENV.fetch('INPUT_MARKDOWN_YML_PATH')), symbolize_names: true)
- metadata_hash = YAML.safe_load(File.open(ENV.fetch('INPUT_METADATA_YML_PATH')), symbolize_names: true)
+ metadata_hash =
+ if input_metadata_yml_path = ENV['INPUT_METADATA_YML_PATH']
+ YAML.safe_load(File.open(input_metadata_yml_path), symbolize_names: true) || {}
+ else
+ {}
+ end
# NOTE: We cannot parallelize this loop like the Javascript WYSIWYG example generation does,
# because the rspec `post` API cannot be parallized (it is not thread-safe, it can't find
@@ -66,8 +71,7 @@ RSpec.describe 'Render Static HTML', :api, type: :request do # rubocop:disable R
private
def write_output_file(static_html_hash)
- tmpfile = File.open(ENV.fetch('OUTPUT_STATIC_HTML_TEMPFILE_PATH'), 'w')
yaml_string = dump_yaml_with_formatting(static_html_hash)
- write_file(tmpfile, yaml_string)
+ write_file(ENV.fetch('OUTPUT_STATIC_HTML_TEMPFILE_PATH'), yaml_string)
end
end
diff --git a/scripts/lib/glfm/update_example_snapshots.rb b/scripts/lib/glfm/update_example_snapshots.rb
index 7dc0d0f7c4b..9075260e748 100644
--- a/scripts/lib/glfm/update_example_snapshots.rb
+++ b/scripts/lib/glfm/update_example_snapshots.rb
@@ -5,11 +5,12 @@ require 'yaml'
require 'psych'
require 'tempfile'
require 'open3'
+require 'active_support/core_ext/enumerable'
require_relative 'constants'
require_relative 'shared'
require_relative 'parse_examples'
-# IMPORTANT NOTE: See https://docs.gitlab.com/ee/development/gitlab_flavored_markdown/specification_guide/
+# IMPORTANT NOTE: See https://docs.gitlab.com/ee/development/gitlab_flavored_markdown/specification_guide/#update-example-snapshotsrb-script
# for details on the implementation and usage of this script. This developers guide
# contains diagrams and documentation of this script,
# including explanations and examples of all files it reads and writes.
@@ -29,8 +30,6 @@ module Glfm
def process(skip_static_and_wysiwyg: false)
output('Updating example snapshots...')
- output('(Skipping static HTML generation)') if skip_static_and_wysiwyg
-
output("Reading #{GLFM_SPEC_TXT_PATH}...")
glfm_spec_txt_lines = File.open(GLFM_SPEC_TXT_PATH).readlines
@@ -115,11 +114,13 @@ module Glfm
def write_snapshot_example_files(all_examples, skip_static_and_wysiwyg:)
output("Reading #{GLFM_EXAMPLE_STATUS_YML_PATH}...")
- glfm_examples_statuses = YAML.safe_load(File.open(GLFM_EXAMPLE_STATUS_YML_PATH), symbolize_names: true)
+ glfm_examples_statuses = YAML.safe_load(File.open(GLFM_EXAMPLE_STATUS_YML_PATH), symbolize_names: true) || {}
validate_glfm_example_status_yml(glfm_examples_statuses)
write_examples_index_yml(all_examples)
+ validate_glfm_config_file_example_names(all_examples)
+
write_markdown_yml(all_examples)
if skip_static_and_wysiwyg
@@ -151,6 +152,50 @@ module Glfm
end
end
+ def validate_glfm_config_file_example_names(all_examples)
+ valid_example_names = all_examples.pluck(:name).map(&:to_sym) # rubocop:disable CodeReuse/ActiveRecord
+
+ # We are re-reading GLFM_EXAMPLE_STATUS_YML_PATH here, but that's OK, it's a small file, and rereading it
+ # allows us to handle it in the same loop as the other manually-curated config files.
+ [
+ GLFM_EXAMPLE_STATUS_YML_PATH,
+ GLFM_EXAMPLE_METADATA_YML_PATH,
+ GLFM_EXAMPLE_NORMALIZATIONS_YML_PATH
+ ].each do |path|
+ output("Reading #{path}...")
+ io = File.open(path)
+ config_file_examples = YAML.safe_load(io, symbolize_names: true, aliases: true)
+
+ # Skip validation if the config file is empty
+ next unless config_file_examples
+
+ config_file_example_names = config_file_examples.keys
+
+ # Validate that all example names exist in the config file refer to an existing example in `examples_index.yml`,
+ # unless it starts with the special prefix `00_`, which is preserved for usage as YAML anchors.
+ invalid_name = config_file_example_names.detect do |name|
+ !name.start_with?('00_') && valid_example_names.exclude?(name)
+ end
+ next unless invalid_name
+
+ # NOTE: The extra spaces before punctuation in the error message allows for easier copy/pasting of the paths.
+ err_msg =
+ <<~TXT
+
+ Error in input specification config file #{path} :
+
+ Config file entry named #{invalid_name}
+ does not have a corresponding example entry in
+ #{ES_EXAMPLES_INDEX_YML_PATH} .
+
+ Please delete or rename this config file entry.
+
+ If this entry is being used as a YAML anchor, please rename it to start with '00_'.
+ TXT
+ raise err_msg
+ end
+ end
+
def write_examples_index_yml(all_examples)
generate_and_write_for_all_examples(
all_examples, ES_EXAMPLES_INDEX_YML_PATH, literal_scalars: false
@@ -219,7 +264,7 @@ module Glfm
# NOTE 2: We run this as an RSpec process, for the same reasons we run via Jest process below:
# because that's the easiest way to ensure a reliable, fully-configured environment in which
- # to execute the markdown-generation logic. Also, in the static/backend case, Rspec
+ # to execute the markdown-processing logic. Also, in the static/backend case, Rspec
# provides the easiest and most reliable way to generate example data via Factorybot
# creation of stable model records. This ensures consistent snapshot values across
# machines/environments.
@@ -244,7 +289,7 @@ module Glfm
wysiwyg_html_and_json_tempfile_path = Dir::Tmpname.create(WYSIWYG_HTML_AND_JSON_TEMPFILE_BASENAME) {}
ENV['OUTPUT_WYSIWYG_HTML_AND_JSON_TEMPFILE_PATH'] = wysiwyg_html_and_json_tempfile_path
- cmd = %(yarn jest --testMatch '**/render_wysiwyg_html_and_json.js' #{__dir__}/render_wysiwyg_html_and_json.js)
+ cmd = "yarn jest --testMatch '**/render_wysiwyg_html_and_json.js' #{__dir__}/render_wysiwyg_html_and_json.js"
run_external_cmd(cmd)
output("Reading generated WYSIWYG HTML and prosemirror JSON from tempfile " \
diff --git a/scripts/lib/glfm/update_specification.rb b/scripts/lib/glfm/update_specification.rb
index 73c23d40de5..c7264547e44 100644
--- a/scripts/lib/glfm/update_specification.rb
+++ b/scripts/lib/glfm/update_specification.rb
@@ -2,26 +2,45 @@
require 'fileutils'
require 'open-uri'
require 'pathname'
+require 'tempfile'
+require 'yaml'
require_relative 'constants'
require_relative 'shared'
+# IMPORTANT NOTE: See https://docs.gitlab.com/ee/development/gitlab_flavored_markdown/specification_guide/#update-specificationrb-script
+# for details on the implementation and usage of this script. This developers guide
+# contains diagrams and documentation of this script,
+# including explanations and examples of all files it reads and writes.
+#
+# Also note that this script is intentionally written in a pure-functional (not OO) style,
+# with no dependencies on Rails or the GitLab libraries. These choices are intended to make
+# it faster and easier to test and debug.
module Glfm
class UpdateSpecification
include Constants
include Shared
- def process
+ def process(skip_spec_html_generation: false)
output('Updating specification...')
- ghfm_spec_txt_lines = load_ghfm_spec_txt
- glfm_spec_txt_string = build_glfm_spec_txt(ghfm_spec_txt_lines)
+
+ ghfm_spec_lines = load_ghfm_spec
+ glfm_spec_txt_string = build_glfm_spec_txt(ghfm_spec_lines)
write_glfm_spec_txt(glfm_spec_txt_string)
+
+ if skip_spec_html_generation
+ output("Skipping GLFM spec.html generation...")
+ return
+ end
+
+ glfm_spec_html_string = generate_glfm_spec_html(glfm_spec_txt_string)
+ write_glfm_spec_html(glfm_spec_html_string)
end
private
- def load_ghfm_spec_txt
+ def load_ghfm_spec
# We only re-download the GitHub Flavored Markdown specification if the
- # UPDATE_GHFM_SPEC_TXT environment variable is set to true, which should only
+ # UPDATE_GHFM_SPEC_MD environment variable is set to true, which should only
# ever be done manually and locally, never in CI. This provides some security
# protection against a possible injection attack vector, if the GitHub-hosted
# version of the spec is ever temporarily compromised with an injection attack.
@@ -29,40 +48,44 @@ module Glfm
# This also avoids doing external network access to download the file
# in CI jobs, which can avoid potentially flaky builds if the GitHub-hosted
# version of the file is temporarily unavailable.
- if ENV['UPDATE_GHFM_SPEC_TXT'] == 'true'
- download_and_write_ghfm_spec_txt
+ if ENV['UPDATE_GHFM_SPEC_MD'] == 'true'
+ update_ghfm_spec_md
else
- read_existing_ghfm_spec_txt
+ read_existing_ghfm_spec_md
end
end
- def read_existing_ghfm_spec_txt
- output("Reading existing #{GHFM_SPEC_TXT_PATH}...")
- File.open(GHFM_SPEC_TXT_PATH).readlines
+ def read_existing_ghfm_spec_md
+ output("Reading existing #{GHFM_SPEC_MD_PATH}...")
+ File.open(GHFM_SPEC_MD_PATH).readlines
end
- def download_and_write_ghfm_spec_txt
+ def update_ghfm_spec_md
output("Downloading #{GHFM_SPEC_TXT_URI}...")
- ghfm_spec_txt_uri_io = URI.open(GHFM_SPEC_TXT_URI)
+ # NOTE: We use `URI.parse` to avoid RuboCop warning "Security/Open",
+ # even though we are using a trusted URI from a string literal constant.
+ # See https://gitlab.com/gitlab-org/gitlab/-/merge_requests/98656#note_1138595002 for details.
+ ghfm_spec_txt_uri_parsed = URI.parse(GHFM_SPEC_TXT_URI)
+ ghfm_spec_txt_uri_io = ghfm_spec_txt_uri_parsed.open
# Read IO stream into an array of lines for easy processing later
- ghfm_spec_txt_lines = ghfm_spec_txt_uri_io.readlines
- raise "Unable to read lines from #{GHFM_SPEC_TXT_URI}" if ghfm_spec_txt_lines.empty?
+ ghfm_spec_lines = ghfm_spec_txt_uri_io.readlines
+ raise "Unable to read lines from #{GHFM_SPEC_TXT_URI}" if ghfm_spec_lines.empty?
# Make sure the GHFM spec version has not changed
- validate_expected_spec_version!(ghfm_spec_txt_lines[2])
+ validate_expected_spec_version!(ghfm_spec_lines[2])
# Reset IO stream and re-read into a single string for easy writing
# noinspection RubyNilAnalysis
ghfm_spec_txt_uri_io.seek(0)
- ghfm_spec_txt_string = ghfm_spec_txt_uri_io.read
- raise "Unable to read string from #{GHFM_SPEC_TXT_URI}" unless ghfm_spec_txt_string
+ ghfm_spec_string = ghfm_spec_txt_uri_io.read
+ raise "Unable to read string from #{GHFM_SPEC_TXT_URI}" unless ghfm_spec_string
- output("Writing #{GHFM_SPEC_TXT_PATH}...")
- GHFM_SPEC_TXT_PATH.dirname.mkpath
- write_file(GHFM_SPEC_TXT_PATH, ghfm_spec_txt_string)
+ output("Writing #{GHFM_SPEC_MD_PATH}...")
+ GHFM_SPEC_MD_PATH.dirname.mkpath
+ write_file(GHFM_SPEC_MD_PATH, ghfm_spec_string)
- ghfm_spec_txt_lines
+ ghfm_spec_lines
end
def validate_expected_spec_version!(version_line)
@@ -76,7 +99,7 @@ module Glfm
glfm_spec_txt_lines = ghfm_spec_txt_lines.dup
replace_header(glfm_spec_txt_lines)
replace_intro_section(glfm_spec_txt_lines)
- insert_examples_txt(glfm_spec_txt_lines)
+ insert_examples(glfm_spec_txt_lines)
glfm_spec_txt_lines.join('')
end
@@ -85,13 +108,13 @@ module Glfm
end
def replace_intro_section(spec_txt_lines)
- glfm_intro_txt_lines = File.open(GLFM_INTRO_TXT_PATH).readlines
- raise "Unable to read lines from #{GLFM_INTRO_TXT_PATH}" if glfm_intro_txt_lines.empty?
+ glfm_intro_md_lines = File.open(GLFM_INTRO_MD_PATH).readlines
+ raise "Unable to read lines from #{GLFM_INTRO_MD_PATH}" if glfm_intro_md_lines.empty?
ghfm_intro_header_begin_index = spec_txt_lines.index do |line|
line =~ INTRODUCTION_HEADER_LINE_TEXT
end
- raise "Unable to locate introduction header line in #{GHFM_SPEC_TXT_PATH}" if ghfm_intro_header_begin_index.nil?
+ raise "Unable to locate introduction header line in #{GHFM_SPEC_MD_PATH}" if ghfm_intro_header_begin_index.nil?
# Find the index of the next header after the introduction header, starting from the index
# of the introduction header this is the length of the intro section
@@ -100,20 +123,29 @@ module Glfm
end
# Replace the intro section with the GitLab flavored Markdown intro section
- spec_txt_lines[ghfm_intro_header_begin_index, ghfm_intro_section_length] = glfm_intro_txt_lines
+ spec_txt_lines[ghfm_intro_header_begin_index, ghfm_intro_section_length] = glfm_intro_md_lines
end
- def insert_examples_txt(spec_txt_lines)
- glfm_examples_txt_lines = File.open(GLFM_EXAMPLES_TXT_PATH).readlines
- raise "Unable to read lines from #{GLFM_EXAMPLES_TXT_PATH}" if glfm_examples_txt_lines.empty?
+ def insert_examples(spec_txt_lines)
+ official_spec_lines = File.open(GLFM_OFFICIAL_SPECIFICATION_EXAMPLES_MD_PATH).readlines
+ raise "Unable to read lines from #{GLFM_OFFICIAL_SPECIFICATION_EXAMPLES_MD_PATH}" if official_spec_lines.empty?
+
+ internal_extension_lines = File.open(GLFM_INTERNAL_EXTENSION_EXAMPLES_MD_PATH).readlines
+ raise "Unable to read lines from #{GLFM_INTERNAL_EXTENSION_EXAMPLES_MD_PATH}" if internal_extension_lines.empty?
ghfm_end_tests_comment_index = spec_txt_lines.index do |line|
line =~ END_TESTS_COMMENT_LINE_TEXT
end
- raise "Unable to locate 'END TESTS' comment line in #{GHFM_SPEC_TXT_PATH}" if ghfm_end_tests_comment_index.nil?
+ raise "Unable to locate 'END TESTS' comment line in #{GHFM_SPEC_MD_PATH}" if ghfm_end_tests_comment_index.nil?
# Insert the GLFM examples before the 'END TESTS' comment line
- spec_txt_lines[ghfm_end_tests_comment_index - 1] = ["\n", glfm_examples_txt_lines, "\n"].flatten
+ spec_txt_lines[ghfm_end_tests_comment_index - 1] = [
+ "\n",
+ official_spec_lines,
+ "\n",
+ internal_extension_lines,
+ "\n"
+ ].flatten
spec_txt_lines
end
@@ -123,5 +155,49 @@ module Glfm
FileUtils.mkdir_p(Pathname.new(GLFM_SPEC_TXT_PATH).dirname)
write_file(GLFM_SPEC_TXT_PATH, glfm_spec_txt_string)
end
+
+ def generate_glfm_spec_html(glfm_spec_txt_string)
+ output("Generating spec.html from spec.txt markdown...")
+
+ input_markdown_yml_string = <<~MARKDOWN
+ ---
+ spec_txt: |
+ #{glfm_spec_txt_string.gsub(/^/, ' ')}
+ MARKDOWN
+
+ # NOTE: We must copy the input YAML file used by the `render_static_html.rb`
+ # to a separate temporary file in order for the script to read them, because it is run in
+ # a separate subprocess, and during unit testing we are unable to substitute the mock
+ # StringIO when reading the input files in the subprocess.
+ ENV['INPUT_MARKDOWN_YML_PATH'] = Dir::Tmpname.create(MARKDOWN_TEMPFILE_BASENAME) do |path|
+ write_file(path, input_markdown_yml_string)
+ end
+
+ # NOTE 1: We shell out to perform the conversion of markdown to static HTML by invoking a
+ # separate subprocess. This allows us to avoid using the Rails API or environment in this
+ # script, which makes developing and running the unit tests for this script much faster,
+ # because they can use 'fast_spec_helper' which does not require the entire Rails environment.
+
+ # NOTE 2: We run this as an RSpec process, for the same reasons we run via Jest process below:
+ # because that's the easiest way to ensure a reliable, fully-configured environment in which
+ # to execute the markdown-processing logic. Also, in the static/backend case.
+
+ # Dir::Tmpname.create requires a block, but we are using the non-block form to get the path
+ # via the return value, so we pass an empty block to avoid an error.
+ static_html_tempfile_path = Dir::Tmpname.create(STATIC_HTML_TEMPFILE_BASENAME) {}
+ ENV['OUTPUT_STATIC_HTML_TEMPFILE_PATH'] = static_html_tempfile_path
+
+ cmd = %(bin/rspec #{__dir__}/render_static_html.rb)
+ run_external_cmd(cmd)
+
+ output("Reading generated spec.html from tempfile #{static_html_tempfile_path}...")
+ YAML.safe_load(File.open(static_html_tempfile_path), symbolize_names: true).fetch(:spec_txt)
+ end
+
+ def write_glfm_spec_html(glfm_spec_html_string)
+ output("Writing #{GLFM_SPEC_TXT_PATH}...")
+ FileUtils.mkdir_p(Pathname.new(GLFM_SPEC_HTML_PATH).dirname)
+ write_file(GLFM_SPEC_HTML_PATH, "#{glfm_spec_html_string}\n")
+ end
end
end
diff --git a/scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb b/scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb
new file mode 100644
index 00000000000..0b824fc589d
--- /dev/null
+++ b/scripts/lib/glfm/verify_all_generated_files_are_up_to_date.rb
@@ -0,0 +1,48 @@
+# frozen_string_literal: true
+require_relative 'constants'
+require_relative 'shared'
+
+# IMPORTANT NOTE: See https://docs.gitlab.com/ee/development/gitlab_flavored_markdown/specification_guide/#verify-all-generated-files-are-up-to-daterb-script
+# for details on the implementation and usage of this script. This developers guide
+# contains diagrams and documentation of this script,
+# including explanations and examples of all files it reads and writes.
+module Glfm
+ class VerifyAllGeneratedFilesAreUpToDate
+ include Constants
+ include Shared
+
+ def process
+ verify_cmd = "git status --porcelain #{GLFM_SPEC_OUTPUT_PATH} #{EXAMPLE_SNAPSHOTS_PATH}"
+ verify_cmd_output = run_external_cmd(verify_cmd)
+ unless verify_cmd_output.empty?
+ msg = "ERROR: Cannot run `#{__FILE__}` because `#{verify_cmd}` shows the following uncommitted changes:\n" \
+ "#{verify_cmd_output}"
+ raise(msg)
+ end
+
+ output('Verifying all generated files are up to date after running GLFM scripts...')
+
+ output("Running `yarn install --frozen-lockfile` to ensure `yarn check-dependencies` doesn't fail...")
+ run_external_cmd('yarn install --frozen-lockfile')
+
+ # noinspection RubyMismatchedArgumentType
+ update_specification_script = File.expand_path('../../glfm/update-specification.rb', __dir__)
+ # noinspection RubyMismatchedArgumentType
+ update_example_snapshots_script = File.expand_path('../../glfm/update-example-snapshots.rb', __dir__)
+
+ output("Running `#{update_specification_script}`...")
+ run_external_cmd(update_specification_script)
+
+ output("Running `#{update_example_snapshots_script}`...")
+ run_external_cmd(update_example_snapshots_script)
+
+ output("Running `#{verify_cmd}` to check that no modifications to generated files have occurred...")
+ verify_cmd_output = run_external_cmd(verify_cmd)
+
+ return if verify_cmd_output.empty?
+
+ raise "The following files were modified by running GLFM scripts. Please review, verify, and commit " \
+ "the changes:\n#{verify_cmd_output}"
+ end
+ end
+end
diff --git a/scripts/lint-json.sh b/scripts/lint-json.sh
new file mode 100755
index 00000000000..685661c789a
--- /dev/null
+++ b/scripts/lint-json.sh
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+set -euo pipefail
+
+for file in "$@"
+do
+ yarn run -s jsonlint -p "$file" | perl -pe 'chomp if eof' | diff "$file" -
+done
diff --git a/scripts/migration_schema_validator.rb b/scripts/migration_schema_validator.rb
new file mode 100644
index 00000000000..08b904ce46c
--- /dev/null
+++ b/scripts/migration_schema_validator.rb
@@ -0,0 +1,117 @@
+# frozen_string_literal: true
+
+require 'open3'
+
+class MigrationSchemaValidator
+ FILENAME = 'db/structure.sql'
+
+ MIGRATION_DIRS = %w[db/migrate db/post_migrate].freeze
+
+ SCHEMA_VERSION_DIR = 'db/schema_migrations'
+
+ VERSION_DIGITS = 14
+
+ def validate!
+ if committed_migrations.empty?
+ puts "\e[32m No migrations found, skipping schema validation\e[0m"
+ return
+ end
+
+ validate_schema_on_rollback!
+ validate_schema_on_migrate!
+ validate_schema_version_files!
+ end
+
+ private
+
+ def validate_schema_on_rollback!
+ committed_migrations.reverse_each do |filename|
+ version = find_migration_version(filename)
+
+ run("scripts/db_tasks db:migrate:down VERSION=#{version}")
+ run("scripts/db_tasks db:schema:dump")
+ end
+
+ git_command = "git diff #{diff_target} -- #{FILENAME}"
+ base_message = "rollback of added migrations does not revert #{FILENAME} to previous state"
+
+ validate_clean_output!(git_command, base_message)
+ end
+
+ def validate_schema_on_migrate!
+ run("scripts/db_tasks db:migrate")
+ run("scripts/db_tasks db:schema:dump")
+
+ git_command = "git diff -- #{FILENAME}"
+ base_message = "the committed #{FILENAME} does not match the one generated by running added migrations"
+
+ validate_clean_output!(git_command, base_message)
+ end
+
+ def validate_schema_version_files!
+ git_command = "git add -A -n #{SCHEMA_VERSION_DIR}"
+ base_message = "the committed files in #{SCHEMA_VERSION_DIR} do not match those expected by the added migrations"
+
+ validate_clean_output!(git_command, base_message)
+ end
+
+ def committed_migrations
+ @committed_migrations ||= begin
+ git_command = "git diff --name-only --diff-filter=A #{diff_target} -- #{MIGRATION_DIRS.join(' ')}"
+
+ run(git_command).split("\n")
+ end
+ end
+
+ def diff_target
+ @diff_target ||= pipeline_for_merged_results? ? target_branch : merge_base
+ end
+
+ def merge_base
+ run("git merge-base #{target_branch} #{source_ref}")
+ end
+
+ def target_branch
+ ENV['CI_MERGE_REQUEST_TARGET_BRANCH_NAME'] || ENV['TARGET'] || ENV['CI_DEFAULT_BRANCH'] || 'master'
+ end
+
+ def source_ref
+ ENV['CI_COMMIT_SHA'] || 'HEAD'
+ end
+
+ def pipeline_for_merged_results?
+ ENV.key?('CI_MERGE_REQUEST_SOURCE_BRANCH_SHA')
+ end
+
+ def find_migration_version(filename)
+ file_basename = File.basename(filename)
+ version_match = /\A(?<version>\d{#{VERSION_DIGITS}})_/o.match(file_basename)
+
+ die "#{filename} has an invalid migration version" if version_match.nil?
+
+ version_match[:version]
+ end
+
+ def validate_clean_output!(command, base_message)
+ command_output = run(command)
+
+ return if command_output.empty?
+
+ die "#{base_message}:\n#{command_output}"
+ end
+
+ def die(message, error_code: 1)
+ puts "\e[31mError: #{message}\e[0m"
+ exit error_code
+ end
+
+ def run(cmd)
+ puts "\e[32m$ #{cmd}\e[37m"
+ stdout_str, stderr_str, status = Open3.capture3(cmd)
+ puts "#{stdout_str}#{stderr_str}\e[0m"
+
+ die "command failed: #{stderr_str}" unless status.success?
+
+ stdout_str.chomp
+ end
+end
diff --git a/scripts/packages/automated_cleanup.rb b/scripts/packages/automated_cleanup.rb
new file mode 100755
index 00000000000..2b5a0011079
--- /dev/null
+++ b/scripts/packages/automated_cleanup.rb
@@ -0,0 +1,126 @@
+#!/usr/bin/env ruby
+
+# frozen_string_literal: true
+
+require 'optparse'
+require 'gitlab'
+
+module Packages
+ class AutomatedCleanup
+ PACKAGES_PER_PAGE = 100
+
+ # $GITLAB_PROJECT_PACKAGES_CLEANUP_API_TOKEN => `Packages Cleanup` project token
+ def initialize(
+ project_path: ENV['CI_PROJECT_PATH'],
+ gitlab_token: ENV['GITLAB_PROJECT_PACKAGES_CLEANUP_API_TOKEN'],
+ api_endpoint: ENV['CI_API_V4_URL'],
+ options: {}
+ )
+ @project_path = project_path
+ @gitlab_token = gitlab_token
+ @api_endpoint = api_endpoint
+ @dry_run = options[:dry_run]
+
+ puts "Dry-run mode." if dry_run
+ end
+
+ def gitlab
+ @gitlab ||= begin
+ Gitlab.configure do |config|
+ config.endpoint = api_endpoint
+ config.private_token = gitlab_token
+ end
+
+ Gitlab
+ end
+ end
+
+ def perform_gitlab_package_cleanup!(package_name:, days_for_delete:)
+ puts "Checking for '#{package_name}' packages created at least #{days_for_delete} days ago..."
+
+ gitlab.project_packages(project_path,
+ package_type: 'generic',
+ package_name: package_name,
+ per_page: PACKAGES_PER_PAGE).auto_paginate do |package|
+ next unless package.name == package_name # the search is fuzzy, so we better check the actual package name
+
+ if old_enough(package, days_for_delete) && not_recently_downloaded(package, days_for_delete)
+ delete_package(package)
+ end
+ end
+ end
+
+ private
+
+ attr_reader :project_path, :gitlab_token, :api_endpoint, :dry_run
+
+ def delete_package(package)
+ print_package_state(package)
+ gitlab.delete_project_package(project_path, package.id) unless dry_run
+ rescue Gitlab::Error::Forbidden
+ puts "Package #{package_full_name(package)} is forbidden: skipping it"
+ end
+
+ def time_ago(days:)
+ Time.now - days * 24 * 3600
+ end
+
+ def old_enough(package, days_for_delete)
+ Time.parse(package.created_at) < time_ago(days: days_for_delete)
+ end
+
+ def not_recently_downloaded(package, days_for_delete)
+ package.last_downloaded_at.nil? ||
+ Time.parse(package.last_downloaded_at) < time_ago(days: days_for_delete)
+ end
+
+ def print_package_state(package)
+ download_text =
+ if package.last_downloaded_at
+ "last downloaded on #{package.last_downloaded_at}"
+ else
+ "never downloaded"
+ end
+
+ puts "\nPackage #{package_full_name(package)} (created on #{package.created_at}) was " \
+ "#{download_text}: deleting it.\n"
+ end
+
+ def package_full_name(package)
+ "'#{package.name}/#{package.version}'"
+ end
+ end
+end
+
+def timed(task)
+ start = Time.now
+ yield(self)
+ puts "#{task} finished in #{Time.now - start} seconds.\n"
+end
+
+if $PROGRAM_NAME == __FILE__
+ options = {
+ dry_run: false
+ }
+
+ OptionParser.new do |opts|
+ opts.on("-d", "--dry-run", "Whether to perform a dry-run or not.") do |value|
+ options[:dry_run] = true
+ end
+
+ opts.on("-h", "--help", "Prints this help") do
+ puts opts
+ exit
+ end
+ end.parse!
+
+ automated_cleanup = Packages::AutomatedCleanup.new(options: options)
+
+ timed('"gitlab-workhorse" packages cleanup') do
+ automated_cleanup.perform_gitlab_package_cleanup!(package_name: 'gitlab-workhorse', days_for_delete: 30)
+ end
+
+ timed('"assets" packages cleanup') do
+ automated_cleanup.perform_gitlab_package_cleanup!(package_name: 'assets', days_for_delete: 7)
+ end
+end
diff --git a/scripts/perf/query_limiting_report.rb b/scripts/perf/query_limiting_report.rb
index 89abc1b301b..364cd6fc5d4 100755
--- a/scripts/perf/query_limiting_report.rb
+++ b/scripts/perf/query_limiting_report.rb
@@ -149,7 +149,7 @@ class QueryLimitingReport
end
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
options = QueryLimitingReport::DEFAULT_OPTIONS.dup
OptionParser.new do |opts|
diff --git a/scripts/pipeline_test_report_builder.rb b/scripts/pipeline_test_report_builder.rb
index 649b68427ea..90af0451864 100755
--- a/scripts/pipeline_test_report_builder.rb
+++ b/scripts/pipeline_test_report_builder.rb
@@ -128,7 +128,7 @@ class PipelineTestReportBuilder
end
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
options = Host::DEFAULT_OPTIONS.dup
OptionParser.new do |opts|
diff --git a/scripts/post_deployment_migrations_validator b/scripts/post_deployment_migrations_validator
new file mode 100755
index 00000000000..3df2f772197
--- /dev/null
+++ b/scripts/post_deployment_migrations_validator
@@ -0,0 +1,31 @@
+#!/usr/bin/env ruby
+
+# frozen_string_literal: true
+
+require_relative 'migration_schema_validator'
+
+class PostDeploymentMigrationsValidator < MigrationSchemaValidator
+ def validate!
+ if committed_migrations.empty?
+ puts "\e[32m No migrations found, skipping post-deployment migrations validation\e[0m"
+ return
+ end
+
+ rollback_commited_migrations
+
+ run("SKIP_POST_DEPLOYMENT_MIGRATIONS=true scripts/db_tasks db:migrate")
+ run("scripts/db_tasks db:migrate")
+ end
+
+ private
+
+ def rollback_commited_migrations
+ committed_migrations.reverse_each do |filename|
+ version = find_migration_version(filename)
+
+ run("scripts/db_tasks db:migrate:down VERSION=#{version}")
+ end
+ end
+end
+
+PostDeploymentMigrationsValidator.new.validate!
diff --git a/scripts/review_apps/automated_cleanup.rb b/scripts/review_apps/automated_cleanup.rb
index e6efbca9e86..2440df6958d 100755
--- a/scripts/review_apps/automated_cleanup.rb
+++ b/scripts/review_apps/automated_cleanup.rb
@@ -1,252 +1,263 @@
+#!/usr/bin/env ruby
+
# frozen_string_literal: true
+require 'optparse'
require 'gitlab'
require_relative File.expand_path('../../tooling/lib/tooling/helm3_client.rb', __dir__)
require_relative File.expand_path('../../tooling/lib/tooling/kubernetes_client.rb', __dir__)
-class AutomatedCleanup
- attr_reader :project_path, :gitlab_token
-
- DEPLOYMENTS_PER_PAGE = 100
- ENVIRONMENT_PREFIX = {
- review_app: 'review/',
- docs_review_app: 'review-docs/'
- }.freeze
- IGNORED_HELM_ERRORS = [
- 'transport is closing',
- 'error upgrading connection',
- 'not found'
- ].freeze
- IGNORED_KUBERNETES_ERRORS = [
- 'NotFound'
- ].freeze
-
- def self.ee?
- # Support former project name for `dev`
- %w[gitlab gitlab-ee].include?(ENV['CI_PROJECT_NAME'])
- end
+module ReviewApps
+ class AutomatedCleanup
+ DEPLOYMENTS_PER_PAGE = 100
+ ENVIRONMENT_PREFIX = {
+ review_app: 'review/',
+ docs_review_app: 'review-docs/'
+ }.freeze
+ IGNORED_HELM_ERRORS = [
+ 'transport is closing',
+ 'error upgrading connection',
+ 'not found'
+ ].freeze
+ IGNORED_KUBERNETES_ERRORS = [
+ 'NotFound'
+ ].freeze
+
+ # $GITLAB_PROJECT_REVIEW_APP_CLEANUP_API_TOKEN => `Automated Review App Cleanup` project token
+ def initialize(
+ project_path: ENV['CI_PROJECT_PATH'],
+ gitlab_token: ENV['GITLAB_PROJECT_REVIEW_APP_CLEANUP_API_TOKEN'],
+ api_endpoint: ENV['CI_API_V4_URL'],
+ options: {}
+ )
+ @project_path = project_path
+ @gitlab_token = gitlab_token
+ @api_endpoint = api_endpoint
+ @dry_run = options[:dry_run]
+
+ puts "Dry-run mode." if dry_run
+ end
- # $GITLAB_PROJECT_REVIEW_APP_CLEANUP_API_TOKEN => `Automated Review App Cleanup` project token
- def initialize(project_path: ENV['CI_PROJECT_PATH'], gitlab_token: ENV['GITLAB_PROJECT_REVIEW_APP_CLEANUP_API_TOKEN'])
- @project_path = project_path
- @gitlab_token = gitlab_token
- end
+ def gitlab
+ @gitlab ||= begin
+ Gitlab.configure do |config|
+ config.endpoint = api_endpoint
+ # gitlab-bot's token "GitLab review apps cleanup"
+ config.private_token = gitlab_token
+ end
- def gitlab
- @gitlab ||= begin
- Gitlab.configure do |config|
- config.endpoint = 'https://gitlab.com/api/v4'
- # gitlab-bot's token "GitLab review apps cleanup"
- config.private_token = gitlab_token
+ Gitlab
end
-
- Gitlab
end
- end
- def review_apps_namespace
- 'review-apps'
- end
+ def review_apps_namespace
+ 'review-apps'
+ end
- def helm
- @helm ||= Tooling::Helm3Client.new(namespace: review_apps_namespace)
- end
+ def helm
+ @helm ||= Tooling::Helm3Client.new(namespace: review_apps_namespace)
+ end
- def kubernetes
- @kubernetes ||= Tooling::KubernetesClient.new(namespace: review_apps_namespace)
- end
+ def kubernetes
+ @kubernetes ||= Tooling::KubernetesClient.new(namespace: review_apps_namespace)
+ end
- def perform_gitlab_environment_cleanup!(days_for_stop:, days_for_delete:)
- puts "Checking for Review Apps not updated in the last #{days_for_stop} days..."
+ def perform_gitlab_environment_cleanup!(days_for_stop:, days_for_delete:)
+ puts "Checking for Review Apps not updated in the last #{days_for_stop} days..."
- checked_environments = []
- delete_threshold = threshold_time(days: days_for_delete)
- stop_threshold = threshold_time(days: days_for_stop)
- deployments_look_back_threshold = threshold_time(days: days_for_delete * 5)
+ checked_environments = []
+ delete_threshold = threshold_time(days: days_for_delete)
+ stop_threshold = threshold_time(days: days_for_stop)
+ deployments_look_back_threshold = threshold_time(days: days_for_delete * 5)
- releases_to_delete = []
+ releases_to_delete = []
- # Delete environments via deployments
- gitlab.deployments(project_path, per_page: DEPLOYMENTS_PER_PAGE, sort: 'desc').auto_paginate do |deployment|
- break if Time.parse(deployment.created_at) < deployments_look_back_threshold
+ # Delete environments via deployments
+ gitlab.deployments(project_path, per_page: DEPLOYMENTS_PER_PAGE, sort: 'desc').auto_paginate do |deployment|
+ break if Time.parse(deployment.created_at) < deployments_look_back_threshold
- environment = deployment.environment
+ environment = deployment.environment
- next unless environment
- next unless environment.name.start_with?(ENVIRONMENT_PREFIX[:review_app])
- next if checked_environments.include?(environment.slug)
+ next unless environment
+ next unless environment.name.start_with?(ENVIRONMENT_PREFIX[:review_app])
+ next if checked_environments.include?(environment.slug)
- last_deploy = deployment.created_at
- deployed_at = Time.parse(last_deploy)
+ last_deploy = deployment.created_at
+ deployed_at = Time.parse(last_deploy)
- if deployed_at < delete_threshold
- deleted_environment = delete_environment(environment, deployment)
- if deleted_environment
- release = Tooling::Helm3Client::Release.new(environment.slug, 1, deployed_at.to_s, nil, nil, review_apps_namespace)
- releases_to_delete << release
- end
- else
- if deployed_at >= stop_threshold
- print_release_state(subject: 'Review App', release_name: environment.slug, release_date: last_deploy, action: 'leaving')
+ if deployed_at < delete_threshold
+ deleted_environment = delete_environment(environment, deployment)
+ if deleted_environment
+ release = Tooling::Helm3Client::Release.new(environment.slug, 1, deployed_at.to_s, nil, nil, review_apps_namespace)
+ releases_to_delete << release
+ end
else
- environment_state = fetch_environment(environment)&.state
- stop_environment(environment, deployment) if environment_state && environment_state != 'stopped'
+ if deployed_at >= stop_threshold
+ print_release_state(subject: 'Review App', release_name: environment.slug, release_date: last_deploy, action: 'leaving')
+ else
+ environment_state = fetch_environment(environment)&.state
+ stop_environment(environment, deployment) if environment_state && environment_state != 'stopped'
+ end
end
+
+ checked_environments << environment.slug
end
- checked_environments << environment.slug
- end
+ delete_stopped_environments(environment_type: :review_app, checked_environments: checked_environments, last_updated_threshold: delete_threshold) do |environment|
+ releases_to_delete << Tooling::Helm3Client::Release.new(environment.slug, 1, environment.updated_at, nil, nil, review_apps_namespace)
+ end
- delete_stopped_environments(environment_type: :review_app, checked_environments: checked_environments, last_updated_threshold: delete_threshold) do |environment|
- releases_to_delete << Tooling::Helm3Client::Release.new(environment.slug, 1, environment.updated_at, nil, nil, review_apps_namespace)
+ delete_helm_releases(releases_to_delete)
end
- delete_helm_releases(releases_to_delete)
- end
+ def perform_gitlab_docs_environment_cleanup!(days_for_stop:, days_for_delete:)
+ puts "Checking for Docs Review Apps not updated in the last #{days_for_stop} days..."
- def perform_gitlab_docs_environment_cleanup!(days_for_stop:, days_for_delete:)
- puts "Checking for Docs Review Apps not updated in the last #{days_for_stop} days..."
+ checked_environments = []
+ stop_threshold = threshold_time(days: days_for_stop)
+ delete_threshold = threshold_time(days: days_for_delete)
- checked_environments = []
- stop_threshold = threshold_time(days: days_for_stop)
- delete_threshold = threshold_time(days: days_for_delete)
+ # Delete environments via deployments
+ gitlab.deployments(project_path, per_page: DEPLOYMENTS_PER_PAGE, sort: 'desc').auto_paginate do |deployment|
+ environment = deployment.environment
- # Delete environments via deployments
- gitlab.deployments(project_path, per_page: DEPLOYMENTS_PER_PAGE, sort: 'desc').auto_paginate do |deployment|
- environment = deployment.environment
+ next unless environment
+ next unless environment.name.start_with?(ENVIRONMENT_PREFIX[:docs_review_app])
+ next if checked_environments.include?(environment.slug)
- next unless environment
- next unless environment.name.start_with?(ENVIRONMENT_PREFIX[:docs_review_app])
- next if checked_environments.include?(environment.slug)
+ last_deploy = deployment.created_at
+ deployed_at = Time.parse(last_deploy)
- last_deploy = deployment.created_at
- deployed_at = Time.parse(last_deploy)
+ if deployed_at < stop_threshold
+ environment_state = fetch_environment(environment)&.state
+ stop_environment(environment, deployment) if environment_state && environment_state != 'stopped'
+ end
- if deployed_at < stop_threshold
- environment_state = fetch_environment(environment)&.state
- stop_environment(environment, deployment) if environment_state && environment_state != 'stopped'
- end
+ delete_environment(environment, deployment) if deployed_at < delete_threshold
- delete_environment(environment, deployment) if deployed_at < delete_threshold
+ checked_environments << environment.slug
+ end
- checked_environments << environment.slug
+ delete_stopped_environments(environment_type: :docs_review_app, checked_environments: checked_environments, last_updated_threshold: delete_threshold)
end
- delete_stopped_environments(environment_type: :docs_review_app, checked_environments: checked_environments, last_updated_threshold: delete_threshold)
- end
-
- def perform_helm_releases_cleanup!(days:)
- puts "Checking for Helm releases that are failed or not updated in the last #{days} days..."
+ def perform_helm_releases_cleanup!(days:)
+ puts "Checking for Helm releases that are failed or not updated in the last #{days} days..."
- threshold = threshold_time(days: days)
+ threshold = threshold_time(days: days)
- releases_to_delete = []
+ releases_to_delete = []
- helm_releases.each do |release|
- # Prevents deleting `dns-gitlab-review-app` releases or other unrelated releases
- next unless release.name.start_with?('review-')
+ helm_releases.each do |release|
+ # Prevents deleting `dns-gitlab-review-app` releases or other unrelated releases
+ next unless release.name.start_with?('review-')
- if release.status == 'failed' || release.last_update < threshold
- releases_to_delete << release
- else
- print_release_state(subject: 'Release', release_name: release.name, release_date: release.last_update, action: 'leaving')
+ if release.status == 'failed' || release.last_update < threshold
+ releases_to_delete << release
+ else
+ print_release_state(subject: 'Release', release_name: release.name, release_date: release.last_update, action: 'leaving')
+ end
end
+
+ delete_helm_releases(releases_to_delete)
end
- delete_helm_releases(releases_to_delete)
- end
+ def perform_stale_namespace_cleanup!(days:)
+ kubernetes_client = Tooling::KubernetesClient.new(namespace: nil)
- def perform_stale_namespace_cleanup!(days:)
- kubernetes_client = Tooling::KubernetesClient.new(namespace: nil)
+ kubernetes_client.cleanup_review_app_namespaces(created_before: threshold_time(days: days), wait: false) unless dry_run
+ end
- kubernetes_client.cleanup_review_app_namespaces(created_before: threshold_time(days: days), wait: false)
- end
+ def perform_stale_pvc_cleanup!(days:)
+ kubernetes.cleanup_by_created_at(resource_type: 'pvc', created_before: threshold_time(days: days), wait: false) unless dry_run
+ end
- def perform_stale_pvc_cleanup!(days:)
- kubernetes.cleanup_by_created_at(resource_type: 'pvc', created_before: threshold_time(days: days), wait: false)
- end
+ private
- private
+ attr_reader :project_path, :gitlab_token, :api_endpoint, :dry_run
- def fetch_environment(environment)
- gitlab.environment(project_path, environment.id)
- rescue Errno::ETIMEDOUT => ex
- puts "Failed to fetch '#{environment.name}' / '#{environment.slug}' (##{environment.id}):\n#{ex.message}"
- nil
- end
+ def fetch_environment(environment)
+ gitlab.environment(project_path, environment.id)
+ rescue Errno::ETIMEDOUT => ex
+ puts "Failed to fetch '#{environment.name}' / '#{environment.slug}' (##{environment.id}):\n#{ex.message}"
+ nil
+ end
- def delete_environment(environment, deployment = nil)
- release_date = deployment ? deployment.created_at : environment.updated_at
- print_release_state(subject: 'Review app', release_name: environment.slug, release_date: release_date, action: 'deleting')
- gitlab.delete_environment(project_path, environment.id)
+ def delete_environment(environment, deployment = nil)
+ release_date = deployment ? deployment.created_at : environment.updated_at
+ print_release_state(subject: 'Review app', release_name: environment.slug, release_date: release_date, action: 'deleting')
+ gitlab.delete_environment(project_path, environment.id) unless dry_run
- rescue Gitlab::Error::Forbidden
- puts "Review app '#{environment.name}' / '#{environment.slug}' (##{environment.id}) is forbidden: skipping it"
- end
+ rescue Gitlab::Error::Forbidden
+ puts "Review app '#{environment.name}' / '#{environment.slug}' (##{environment.id}) is forbidden: skipping it"
+ end
- def stop_environment(environment, deployment)
- print_release_state(subject: 'Review app', release_name: environment.slug, release_date: deployment.created_at, action: 'stopping')
- gitlab.stop_environment(project_path, environment.id)
+ def stop_environment(environment, deployment)
+ print_release_state(subject: 'Review app', release_name: environment.slug, release_date: deployment.created_at, action: 'stopping')
+ gitlab.stop_environment(project_path, environment.id) unless dry_run
- rescue Gitlab::Error::Forbidden
- puts "Review app '#{environment.name}' / '#{environment.slug}' (##{environment.id}) is forbidden: skipping it"
- end
+ rescue Gitlab::Error::Forbidden
+ puts "Review app '#{environment.name}' / '#{environment.slug}' (##{environment.id}) is forbidden: skipping it"
+ end
- def delete_stopped_environments(environment_type:, checked_environments:, last_updated_threshold:)
- gitlab.environments(project_path, per_page: DEPLOYMENTS_PER_PAGE, sort: 'desc', states: 'stopped', search: ENVIRONMENT_PREFIX[environment_type]).auto_paginate do |environment|
- next if skip_environment?(environment: environment, checked_environments: checked_environments, last_updated_threshold: last_updated_threshold, environment_type: environment_type)
+ def delete_stopped_environments(environment_type:, checked_environments:, last_updated_threshold:)
+ gitlab.environments(project_path, per_page: DEPLOYMENTS_PER_PAGE, sort: 'desc', states: 'stopped', search: ENVIRONMENT_PREFIX[environment_type]).auto_paginate do |environment|
+ next if skip_environment?(environment: environment, checked_environments: checked_environments, last_updated_threshold: last_updated_threshold, environment_type: environment_type)
- yield environment if delete_environment(environment)
+ yield environment if delete_environment(environment)
- checked_environments << environment.slug
+ checked_environments << environment.slug
+ end
end
- end
- def skip_environment?(environment:, checked_environments:, last_updated_threshold:, environment_type:)
- return true unless environment.name.start_with?(ENVIRONMENT_PREFIX[environment_type])
- return true if checked_environments.include?(environment.slug)
- return true if Time.parse(environment.updated_at) > last_updated_threshold
+ def skip_environment?(environment:, checked_environments:, last_updated_threshold:, environment_type:)
+ return true unless environment.name.start_with?(ENVIRONMENT_PREFIX[environment_type])
+ return true if checked_environments.include?(environment.slug)
+ return true if Time.parse(environment.updated_at) > last_updated_threshold
- false
- end
+ false
+ end
- def helm_releases
- args = ['--all', '--date']
+ def helm_releases
+ args = ['--all', '--date']
- helm.releases(args: args)
- end
+ helm.releases(args: args)
+ end
- def delete_helm_releases(releases)
- return if releases.empty?
+ def delete_helm_releases(releases)
+ return if releases.empty?
- releases.each do |release|
- print_release_state(subject: 'Release', release_name: release.name, release_status: release.status, release_date: release.last_update, action: 'cleaning')
- end
+ releases.each do |release|
+ print_release_state(subject: 'Release', release_name: release.name, release_status: release.status, release_date: release.last_update, action: 'cleaning')
+ end
- releases_names = releases.map(&:name)
- helm.delete(release_name: releases_names)
- kubernetes.cleanup_by_release(release_name: releases_names, wait: false)
+ releases_names = releases.map(&:name)
+ unless dry_run
+ helm.delete(release_name: releases_names)
+ kubernetes.cleanup_by_release(release_name: releases_names, wait: false)
+ end
- rescue Tooling::Helm3Client::CommandFailedError => ex
- raise ex unless ignore_exception?(ex.message, IGNORED_HELM_ERRORS)
+ rescue Tooling::Helm3Client::CommandFailedError => ex
+ raise ex unless ignore_exception?(ex.message, IGNORED_HELM_ERRORS)
- puts "Ignoring the following Helm error:\n#{ex}\n"
- rescue Tooling::KubernetesClient::CommandFailedError => ex
- raise ex unless ignore_exception?(ex.message, IGNORED_KUBERNETES_ERRORS)
+ puts "Ignoring the following Helm error:\n#{ex}\n"
+ rescue Tooling::KubernetesClient::CommandFailedError => ex
+ raise ex unless ignore_exception?(ex.message, IGNORED_KUBERNETES_ERRORS)
- puts "Ignoring the following Kubernetes error:\n#{ex}\n"
- end
+ puts "Ignoring the following Kubernetes error:\n#{ex}\n"
+ end
- def threshold_time(days:)
- Time.now - days * 24 * 3600
- end
+ def threshold_time(days:)
+ Time.now - days * 24 * 3600
+ end
- def ignore_exception?(exception_message, exceptions_ignored)
- exception_message.match?(/(#{exceptions_ignored})/)
- end
+ def ignore_exception?(exception_message, exceptions_ignored)
+ exception_message.match?(/(#{exceptions_ignored})/)
+ end
- def print_release_state(subject:, release_name:, release_date:, action:, release_status: nil)
- puts "\n#{subject} '#{release_name}' #{"(#{release_status}) " if release_status}was last deployed on #{release_date}: #{action} it.\n"
+ def print_release_state(subject:, release_name:, release_date:, action:, release_status: nil)
+ puts "\n#{subject} '#{release_name}' #{"(#{release_status}) " if release_status}was last deployed on #{release_date}: #{action} it.\n"
+ end
end
end
@@ -256,28 +267,43 @@ def timed(task)
puts "#{task} finished in #{Time.now - start} seconds.\n"
end
-automated_cleanup = AutomatedCleanup.new
+if $PROGRAM_NAME == __FILE__
+ options = {
+ dry_run: false
+ }
-timed('Review Apps cleanup') do
- automated_cleanup.perform_gitlab_environment_cleanup!(days_for_stop: 5, days_for_delete: 6)
-end
+ OptionParser.new do |opts|
+ opts.on("-d", "--dry-run", "Whether to perform a dry-run or not.") do |value|
+ options[:dry_run] = true
+ end
-timed('Docs Review Apps cleanup') do
- automated_cleanup.perform_gitlab_docs_environment_cleanup!(days_for_stop: 20, days_for_delete: 30)
-end
+ opts.on("-h", "--help", "Prints this help") do
+ puts opts
+ exit
+ end
+ end.parse!
-puts
+ automated_cleanup = ReviewApps::AutomatedCleanup.new(options: options)
-timed('Helm releases cleanup') do
- automated_cleanup.perform_helm_releases_cleanup!(days: 7)
-end
+ timed('Review Apps cleanup') do
+ automated_cleanup.perform_gitlab_environment_cleanup!(days_for_stop: 5, days_for_delete: 6)
+ end
-timed('Stale Namespace cleanup') do
- automated_cleanup.perform_stale_namespace_cleanup!(days: 14)
-end
+ timed('Docs Review Apps cleanup') do
+ automated_cleanup.perform_gitlab_docs_environment_cleanup!(days_for_stop: 20, days_for_delete: 30)
+ end
-timed('Stale PVC cleanup') do
- automated_cleanup.perform_stale_pvc_cleanup!(days: 30)
-end
+ puts
+
+ timed('Helm releases cleanup') do
+ automated_cleanup.perform_helm_releases_cleanup!(days: 7)
+ end
-exit(0)
+ timed('Stale Namespace cleanup') do
+ automated_cleanup.perform_stale_namespace_cleanup!(days: 14)
+ end
+
+ timed('Stale PVC cleanup') do
+ automated_cleanup.perform_stale_pvc_cleanup!(days: 30)
+ end
+end
diff --git a/scripts/review_apps/review-apps.sh b/scripts/review_apps/review-apps.sh
index e979d0f75cf..0fc245a409f 100755
--- a/scripts/review_apps/review-apps.sh
+++ b/scripts/review_apps/review-apps.sh
@@ -154,12 +154,8 @@ function disable_sign_ups() {
true
fi
- # Create the root token
- local set_token_rb="token = User.find_by_username('root').personal_access_tokens.create(scopes: [:api], name: 'Token to disable sign-ups'); token.set_token('${REVIEW_APPS_ROOT_TOKEN}'); begin; token.save!; rescue(ActiveRecord::RecordNotUnique); end"
- retry "run_task \"${set_token_rb}\""
-
- # Disable sign-ups
- local disable_signup_rb="Gitlab::CurrentSettings.current_application_settings.update!(signup_enabled: false)"
+ # Create the root token + Disable sign-ups
+ local disable_signup_rb="token = User.find_by_username('root').personal_access_tokens.create(scopes: [:api], name: 'Token to disable sign-ups'); token.set_token('${REVIEW_APPS_ROOT_TOKEN}'); begin; token.save!; rescue(ActiveRecord::RecordNotUnique); end; Gitlab::CurrentSettings.current_application_settings.update!(signup_enabled: false)"
if (retry "run_task \"${disable_signup_rb}\""); then
echoinfo "Sign-ups have been disabled successfully."
else
@@ -239,16 +235,21 @@ function create_application_secret() {
}
function download_chart() {
- echoinfo "Downloading the GitLab chart..." true
+ # If the requirements.lock is present, it means we got everything we need from the cache.
+ if [[ -f "gitlab-${GITLAB_HELM_CHART_REF}/requirements.lock" ]]; then
+ echosuccess "Downloading/Building chart dependencies skipped. Using the chart ${gitlab-${GITLAB_HELM_CHART_REF}} local folder'..."
+ else
+ echoinfo "Downloading the GitLab chart..." true
- curl --location -o gitlab.tar.bz2 "https://gitlab.com/gitlab-org/charts/gitlab/-/archive/${GITLAB_HELM_CHART_REF}/gitlab-${GITLAB_HELM_CHART_REF}.tar.bz2"
- tar -xjf gitlab.tar.bz2
+ curl --location -o gitlab.tar.bz2 "https://gitlab.com/gitlab-org/charts/gitlab/-/archive/${GITLAB_HELM_CHART_REF}/gitlab-${GITLAB_HELM_CHART_REF}.tar.bz2"
+ tar -xjf gitlab.tar.bz2
- echoinfo "Adding the gitlab repo to Helm..."
- helm repo add gitlab https://charts.gitlab.io
+ echoinfo "Adding the gitlab repo to Helm..."
+ helm repo add gitlab https://charts.gitlab.io
- echoinfo "Building the gitlab chart's dependencies..."
- helm dependency build "gitlab-${GITLAB_HELM_CHART_REF}"
+ echoinfo "Building the gitlab chart's dependencies..."
+ helm dependency build "gitlab-${GITLAB_HELM_CHART_REF}"
+ fi
}
function base_config_changed() {
diff --git a/scripts/rspec_helpers.sh b/scripts/rspec_helpers.sh
index 5d7bd844c2c..73030d2ad6c 100644
--- a/scripts/rspec_helpers.sh
+++ b/scripts/rspec_helpers.sh
@@ -247,7 +247,12 @@ function rspec_paralellized_job() {
cp "${KNAPSACK_RSPEC_SUITE_REPORT_PATH}" "${KNAPSACK_REPORT_PATH}"
- export KNAPSACK_TEST_FILE_PATTERN=$(ruby -r./tooling/quality/test_level.rb -e "puts Quality::TestLevel.new(${spec_folder_prefixes}).pattern(:${test_level})")
+ export KNAPSACK_TEST_FILE_PATTERN="spec/{,**/}*_spec.rb"
+
+ if [[ "${test_level}" != "foss-impact" ]]; then
+ export KNAPSACK_TEST_FILE_PATTERN=$(ruby -r./tooling/quality/test_level.rb -e "puts Quality::TestLevel.new(${spec_folder_prefixes}).pattern(:${test_level})")
+ fi
+
export FLAKY_RSPEC_REPORT_PATH="${rspec_flaky_folder_path}all_${report_name}_report.json"
export NEW_FLAKY_RSPEC_REPORT_PATH="${rspec_flaky_folder_path}new_${report_name}_report.json"
export SKIPPED_FLAKY_TESTS_REPORT_PATH="${rspec_flaky_folder_path}skipped_flaky_tests_${report_name}_report.txt"
@@ -268,8 +273,8 @@ function rspec_paralellized_job() {
debug_rspec_variables
- if [[ -n $RSPEC_TESTS_MAPPING_ENABLED ]]; then
- tooling/bin/parallel_rspec --rspec_args "$(rspec_args "${rspec_opts}")" --filter "${RSPEC_MATCHING_TESTS_PATH}" || rspec_run_status=$?
+ if [[ -n "${RSPEC_TESTS_MAPPING_ENABLED}" ]]; then
+ tooling/bin/parallel_rspec --rspec_args "$(rspec_args "${rspec_opts}")" --filter "${RSPEC_TESTS_FILTER_FILE}" || rspec_run_status=$?
else
tooling/bin/parallel_rspec --rspec_args "$(rspec_args "${rspec_opts}")" || rspec_run_status=$?
fi
@@ -292,6 +297,12 @@ function rspec_paralellized_job() {
function retry_failed_rspec_examples() {
local rspec_run_status=0
+ # Sometimes the file isn't created or is empty. In that case we exit(1) ourselves, otherwise, RSpec would
+ # not run any examples an exit successfully, actually hiding failed tests!
+ if [[ ! -f "${RSPEC_LAST_RUN_RESULTS_FILE}" ]] || [[ ! -s "${RSPEC_LAST_RUN_RESULTS_FILE}" ]]; then
+ exit 1
+ fi
+
# Keep track of the tests that are retried, later consolidated in a single file by the `rspec:flaky-tests-report` job
local failed_examples=$(grep " failed" ${RSPEC_LAST_RUN_RESULTS_FILE})
echo "${CI_JOB_URL}" > "${RETRIED_TESTS_REPORT_PATH}"
@@ -357,41 +368,12 @@ function rspec_fail_fast() {
fi
}
-function rspec_matched_foss_tests() {
- local test_file_count_threshold=20
- local matching_tests_file=${1}
- local foss_matching_tests_file="${matching_tests_file}-foss"
+function filter_rspec_matched_foss_tests() {
+ local matching_tests_file="${1}"
+ local foss_matching_tests_file="${2}"
# Keep only files that exists (i.e. exclude EE speficic files)
- cat ${matching_tests_file} | ruby -e 'puts $stdin.read.split(" ").select { |f| File.exist?(f) && f.include?("spec/") }.join(" ")' > "${foss_matching_tests_file}"
-
- echo "Matching tests file:"
- cat ${matching_tests_file}
- echo -e "\n\n"
-
- echo "FOSS matching tests file:"
- cat ${foss_matching_tests_file}
- echo -e "\n\n"
-
- local rspec_opts=${2}
- local test_files="$(cat ${foss_matching_tests_file})"
- local test_file_count=$(wc -w "${foss_matching_tests_file}" | awk {'print $1'})
-
- if [[ "${test_file_count}" -gt "${test_file_count_threshold}" ]]; then
- echo "This job is intentionally failed because there are more than ${test_file_count_threshold} FOSS test files matched,"
- echo "which would take too long to run in this job."
- echo "To reduce the likelihood of breaking FOSS pipelines,"
- echo "please add ~\"pipeline:run-as-if-foss\" label to the merge request and trigger a new pipeline."
- echo "This would run all as-if-foss jobs in this merge request"
- echo "and remove this failing job from the pipeline."
- exit 1
- fi
-
- if [[ -n $test_files ]]; then
- rspec_simple_job "${rspec_opts} ${test_files}"
- else
- echo "No impacted FOSS rspec tests to run"
- fi
+ cat ${matching_tests_file} | ruby -e 'puts $stdin.read.split(" ").select { |f| f.start_with?("spec/") && File.exist?(f) }.join(" ")' > "${foss_matching_tests_file}"
}
function generate_frontend_fixtures_mapping() {
diff --git a/scripts/rubocop-max-files-in-cache-check b/scripts/rubocop-max-files-in-cache-check
index 5b422d0a0f4..34caa0e197c 100755
--- a/scripts/rubocop-max-files-in-cache-check
+++ b/scripts/rubocop-max-files-in-cache-check
@@ -1,7 +1,8 @@
#!/usr/bin/env ruby
# frozen_string_literal: true
-require 'yaml'
+require_relative '../config/bundler_setup'
+require 'rubocop'
MINIMUM_MAX_FILES_IN_CACHE_MARGIN = 1.05
RECOMMENDED_MAX_FILES_IN_CACHE_MARGIN = 1.25
@@ -14,7 +15,7 @@ rubocop_target_files_count = `#{RUBOCOP_LIST_TARGET_FILES_COMMAND}`.strip.to_i
raise Error, "#{RUBOCOP_LIST_TARGET_FILES_COMMAND} failed with status #{$?}!" if rubocop_target_files_count == 0
rubocop_target_files_count = rubocop_target_files_count.to_i
-rubocop_current_max_files_in_cache = YAML.load_file(File.expand_path('../.rubocop.yml', __dir__)).dig('AllCops', 'MaxFilesInCache').to_i
+rubocop_current_max_files_in_cache = RuboCop::ConfigLoader.load_yaml_configuration(File.expand_path('../.rubocop.yml', __dir__)).dig('AllCops', 'MaxFilesInCache').to_i
minimum_max_files_in_cache = (rubocop_target_files_count * MINIMUM_MAX_FILES_IN_CACHE_MARGIN).round(-3)
# We want AllCops.MaxFilesInCache to be at least 5% above the actual files count at any time to give us enough time to increase it accordingly
diff --git a/scripts/rubocop-parse b/scripts/rubocop-parse
index 4c82be5934b..0a234df81cd 100755
--- a/scripts/rubocop-parse
+++ b/scripts/rubocop-parse
@@ -30,23 +30,57 @@ require_relative '../config/bundler_setup'
require 'rubocop'
require 'optparse'
-def print_ast(file, source, version)
- version ||= RuboCop::ConfigStore.new.for_file(file).target_ruby_version
- puts RuboCop::AST::ProcessedSource.new(source, version).ast.to_s
+module Helper
+ extend self
+
+ class << self
+ attr_writer :ruby_version
+ end
+
+ def ast(source, file: '', version: nil)
+ version ||= ruby_version
+ puts RuboCop::AST::ProcessedSource.new(source, version).ast.to_s
+ end
+
+ def ruby_version
+ @ruby_version ||= rubocop_target_ruby_version
+ end
+
+ def rubocop_target_ruby_version
+ @rubocop_target_ruby_version ||= RuboCop::ConfigStore.new.for_file('.').target_ruby_version
+ end
end
-options = Struct.new(:eval, :ruby_version, :print_help, keyword_init: true).new
+def start_irb
+ require 'irb'
+
+ include Helper # rubocop:disable Style/MixinUsage
+
+ puts "Ruby version: #{ruby_version}"
+ puts
+ puts "Use `ast(source_string, version: nil)` method to parse code and output AST. For example:"
+ puts " ast('puts :hello')"
+ puts
+
+ IRB.start
+end
+
+options = Struct.new(:eval, :interactive, :print_help, keyword_init: true).new
parser = OptionParser.new do |opts|
- opts.banner = "Usage: #{$0} [-e code] [FILE...]"
+ opts.banner = "Usage: #{$PROGRAM_NAME} [-e code] [FILE...]"
opts.on('-e FRAGMENT', '--eval FRAGMENT', 'Process a fragment of Ruby code') do |code|
options.eval = code
end
+ opts.on('-i', '--interactive', '') do
+ options.interactive = true
+ end
+
opts.on('-v RUBY_VERSION', '--ruby-version RUBY_VERSION',
'Parse as Ruby would. Defaults to RuboCop TargetRubyVersion setting.') do |ruby_version|
- options.ruby_version = Float(ruby_version)
+ Helper.ruby_version = Float(ruby_version)
end
opts.on('-h', '--help') do
@@ -54,20 +88,31 @@ parser = OptionParser.new do |opts|
end
end
-args = parser.parse!
+files = parser.parse!
if options.print_help
puts parser
- exit
-end
-
-print_ast('', options.eval, options.ruby_version) if options.eval
+elsif options.interactive
+ if options.eval || files.any?
+ puts "Cannot combine `--interactive` with `--eval` or passing files. Aborting..."
+ puts
-args.each do |arg|
- if File.file?(arg)
- source = File.read(arg)
- print_ast(arg, source, options.ruby_version)
+ puts parser
+ exit 1
else
- warn "Skipping non-file #{arg.inspect}"
+ start_irb
end
+elsif options.eval
+ Helper.ast(options.eval)
+elsif files.any?
+ files.each do |file|
+ if File.file?(file)
+ source = File.read(file)
+ Helper.ast(source, file: file)
+ else
+ warn "Skipping non-file #{file.inspect}"
+ end
+ end
+else
+ puts parser
end
diff --git a/scripts/setup/find-jh-branch.rb b/scripts/setup/find-jh-branch.rb
index a7c1cafd74c..5b36aa7a1f4 100755
--- a/scripts/setup/find-jh-branch.rb
+++ b/scripts/setup/find-jh-branch.rb
@@ -97,6 +97,6 @@ class FindJhBranch
end
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
puts FindJhBranch.new.run
end
diff --git a/scripts/slack b/scripts/slack
index 293f8070504..3ce2b4553ee 100755
--- a/scripts/slack
+++ b/scripts/slack
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
# This is based on https://gitlab.com/gitlab-org/gitlab-qa/-/blob/master/bin/slack
#
# Sends Slack notification MSG to CI_SLACK_WEBHOOK_URL (which needs to be set).
diff --git a/scripts/static-analysis b/scripts/static-analysis
index 53f84c19ac6..c6cf09e056b 100755
--- a/scripts/static-analysis
+++ b/scripts/static-analysis
@@ -191,7 +191,7 @@ class StaticAnalysis
end
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
options = {}
if ARGV.include?('--dry-run')
diff --git a/scripts/trigger-build.rb b/scripts/trigger-build.rb
index b368bbdb1f1..897ca9f473e 100755
--- a/scripts/trigger-build.rb
+++ b/scripts/trigger-build.rb
@@ -427,7 +427,7 @@ module Trigger
Job = Class.new(Pipeline)
end
-if $0 == __FILE__
+if $PROGRAM_NAME == __FILE__
case ARGV[0]
when 'cng'
Trigger::CNG.new.invoke!.wait!
diff --git a/scripts/utils.sh b/scripts/utils.sh
index 10b7f856ee6..ea2b390f249 100644
--- a/scripts/utils.sh
+++ b/scripts/utils.sh
@@ -62,6 +62,22 @@ function bundle_install_script() {
echo -e "section_end:`date +%s`:bundle-install\r\e[0K"
}
+function yarn_install_script() {
+ echo -e "section_start:`date +%s`:yarn-install[collapsed=true]\r\e[0KInstalling Yarn packages"
+
+ retry yarn install --frozen-lockfile
+
+ echo -e "section_end:`date +%s`:yarn-install\r\e[0K"
+}
+
+function assets_compile_script() {
+ echo -e "section_start:`date +%s`:assets-compile[collapsed=true]\r\e[0KCompiling frontend assets"
+
+ bin/rake gitlab:assets:compile
+
+ echo -e "section_end:`date +%s`:assets-compile\r\e[0K"
+}
+
function setup_db_user_only() {
source scripts/create_postgres_user.sh
}
@@ -77,12 +93,12 @@ function setup_db() {
}
function install_gitlab_gem() {
- run_timed_command "gem install httparty --no-document --version 0.18.1"
- run_timed_command "gem install gitlab --no-document --version 4.17.0"
+ run_timed_command "gem install httparty --no-document --version 0.20.0"
+ run_timed_command "gem install gitlab --no-document --version 4.19.0"
}
function install_tff_gem() {
- run_timed_command "gem install test_file_finder --no-document --version 0.1.1"
+ run_timed_command "gem install test_file_finder --no-document --version 0.1.4"
}
function install_junit_merge_gem() {
diff --git a/scripts/validate_migration_schema b/scripts/validate_migration_schema
index 5c389851844..c6f93b855ec 100755
--- a/scripts/validate_migration_schema
+++ b/scripts/validate_migration_schema
@@ -2,120 +2,6 @@
# frozen_string_literal: true
-require 'open3'
-
-class MigrationSchemaValidator
- FILENAME = 'db/structure.sql'
-
- MIGRATION_DIRS = %w[db/migrate db/post_migrate].freeze
-
- SCHEMA_VERSION_DIR = 'db/schema_migrations'
-
- VERSION_DIGITS = 14
-
- def validate!
- if committed_migrations.empty?
- puts "\e[32m No migrations found, skipping schema validation\e[0m"
- return
- end
-
- validate_schema_on_rollback!
- validate_schema_on_migrate!
- validate_schema_version_files!
- end
-
- private
-
- def validate_schema_on_rollback!
- committed_migrations.reverse_each do |filename|
- version = find_migration_version(filename)
-
- run("scripts/db_tasks db:migrate:down VERSION=#{version}")
- run("scripts/db_tasks db:schema:dump")
- end
-
- git_command = "git diff #{diff_target} -- #{FILENAME}"
- base_message = "rollback of added migrations does not revert #{FILENAME} to previous state"
-
- validate_clean_output!(git_command, base_message)
- end
-
- def validate_schema_on_migrate!
- run("scripts/db_tasks db:migrate")
- run("scripts/db_tasks db:schema:dump")
-
- git_command = "git diff -- #{FILENAME}"
- base_message = "the committed #{FILENAME} does not match the one generated by running added migrations"
-
- validate_clean_output!(git_command, base_message)
- end
-
- def validate_schema_version_files!
- git_command = "git add -A -n #{SCHEMA_VERSION_DIR}"
- base_message = "the committed files in #{SCHEMA_VERSION_DIR} do not match those expected by the added migrations"
-
- validate_clean_output!(git_command, base_message)
- end
-
- def committed_migrations
- @committed_migrations ||= begin
- git_command = "git diff --name-only --diff-filter=A #{diff_target} -- #{MIGRATION_DIRS.join(' ')}"
-
- run(git_command).split("\n")
- end
- end
-
- def diff_target
- @diff_target ||= pipeline_for_merged_results? ? target_branch : merge_base
- end
-
- def merge_base
- run("git merge-base #{target_branch} #{source_ref}")
- end
-
- def target_branch
- ENV['CI_MERGE_REQUEST_TARGET_BRANCH_NAME'] || ENV['TARGET'] || ENV['CI_DEFAULT_BRANCH'] || 'master'
- end
-
- def source_ref
- ENV['CI_COMMIT_SHA'] || 'HEAD'
- end
-
- def pipeline_for_merged_results?
- ENV.key?('CI_MERGE_REQUEST_SOURCE_BRANCH_SHA')
- end
-
- def find_migration_version(filename)
- file_basename = File.basename(filename)
- version_match = /\A(?<version>\d{#{VERSION_DIGITS}})_/o.match(file_basename)
-
- die "#{filename} has an invalid migration version" if version_match.nil?
-
- version_match[:version]
- end
-
- def validate_clean_output!(command, base_message)
- command_output = run(command)
-
- return if command_output.empty?
-
- die "#{base_message}:\n#{command_output}"
- end
-
- def die(message, error_code: 1)
- puts "\e[31mError: #{message}\e[0m"
- exit error_code
- end
-
- def run(cmd)
- puts "\e[32m$ #{cmd}\e[37m"
- stdout_str, stderr_str, status = Open3.capture3(cmd)
- puts "#{stdout_str}#{stderr_str}\e[0m"
-
- die "command failed: #{stderr_str}" unless status.success?
-
- stdout_str.chomp
- end
-end
+require_relative 'migration_schema_validator'
MigrationSchemaValidator.new.validate!