Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2024-01-11 12:08:22 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2024-01-11 12:08:22 +0300
commit6f5be4b446db2f17fc0307c4fce8ae285b35d89a (patch)
tree2d7c4a648066342b0cc12c89d9e186f3a16b8bad
parent826d6628ca045013b9d19ec5cb4d02ac81b76c68 (diff)
Add latest changes from gitlab-org/gitlab@master
-rw-r--r--.gitlab/ci/qa.gitlab-ci.yml39
-rw-r--r--.gitlab/ci/rules.gitlab-ci.yml25
-rw-r--r--app/models/concerns/atomic_internal_id.rb4
-rw-r--r--app/models/project.rb1
-rw-r--r--config/feature_flags/development/manage_project_access_tokens.yml9
-rw-r--r--data/deprecations/16-9-verify-after-script-canceled-status.yml13
-rw-r--r--db/docs/batched_background_migrations/backfill_branch_protection_namespace_setting.yml2
-rw-r--r--db/migrate/20240108121335_copy_internal_ids_for_epics_and_issues_usage_on_groups.rb74
-rw-r--r--db/post_migrate/20240104155616_finalize_backfill_default_branch_protection_namespace_settings.rb24
-rw-r--r--db/post_migrate/20240108125135_remove_internal_ids_triggers.rb74
-rw-r--r--db/post_migrate/20240108125335_add_temporary_index_internal_ids_on_id_and_usage.rb17
-rw-r--r--db/post_migrate/20240108185335_backfill_internal_ids_with_issues_usage_for_epics.rb56
-rw-r--r--db/post_migrate/20240108215335_remove_internal_ids_tmp_index.rb17
-rw-r--r--db/schema_migrations/202401041556161
-rw-r--r--db/schema_migrations/202401081213351
-rw-r--r--db/schema_migrations/202401081251351
-rw-r--r--db/schema_migrations/202401081253351
-rw-r--r--db/schema_migrations/202401081853351
-rw-r--r--db/schema_migrations/202401082153351
-rw-r--r--doc/api/graphql/getting_started.md3
-rw-r--r--doc/development/cicd/components.md80
-rw-r--r--doc/development/cicd/img/avatar_component_project.pngbin0 -> 2545 bytes
-rw-r--r--doc/development/cicd/templates.md2
-rw-r--r--doc/development/pipelines/internals.md1
-rw-r--r--doc/update/deprecations.md14
-rw-r--r--locale/gitlab.pot3
-rw-r--r--package.json4
-rwxr-xr-xscripts/download-downstream-artifact.rb121
-rw-r--r--spec/migrations/20240108121335_copy_internal_ids_for_epics_and_issues_usage_on_groups_spec.rb69
-rw-r--r--spec/migrations/20240108185335_backfill_internal_ids_with_issues_usage_for_epics_spec.rb52
-rw-r--r--spec/scripts/download_downstream_artifact_spec.rb180
-rw-r--r--workhorse/config_test.go389
-rw-r--r--workhorse/internal/config/config.go86
-rw-r--r--workhorse/internal/testhelper/testhelper.go74
-rw-r--r--yarn.lock16
35 files changed, 1402 insertions, 53 deletions
diff --git a/.gitlab/ci/qa.gitlab-ci.yml b/.gitlab/ci/qa.gitlab-ci.yml
index 2aea64583da..c2393e5e0d3 100644
--- a/.gitlab/ci/qa.gitlab-ci.yml
+++ b/.gitlab/ci/qa.gitlab-ci.yml
@@ -212,3 +212,42 @@ e2e:test-on-gdk:
DYNAMIC_PIPELINE_YML: test-on-gdk-pipeline.yml
SKIP_MESSAGE: Skipping test-on-gdk due to mr containing only quarantine changes!
GDK_IMAGE: "${CI_REGISTRY_IMAGE}/gitlab-qa-gdk:${CI_COMMIT_SHA}"
+
+e2e:code-suggestions-eval:
+ extends:
+ - .qa:rules:code-suggestions-eval
+ stage: qa
+ needs: ["build-gdk-image"]
+ variables:
+ CS_EVAL_DOWNSTREAM_BRANCH: main
+ GITLAB_SHA: $CI_COMMIT_SHA
+ trigger:
+ strategy: depend
+ forward:
+ yaml_variables: true
+ pipeline_variables: true
+ project: gitlab-com/create-stage/code-creation/code-suggestion-scenarios
+ branch: $CS_EVAL_DOWNSTREAM_BRANCH
+
+e2e:code-suggestions-eval-results:
+ extends:
+ - .default-retry
+ - .qa:rules:code-suggestions-eval-results
+ stage: post-qa
+ needs:
+ - e2e:code-suggestions-eval
+ variables:
+ TRIGGER_JOB_NAME: "e2e:code-suggestions-eval"
+ DOWNSTREAM_PROJECT: gitlab-com/create-stage/code-creation/code-suggestion-scenarios
+ DOWNSTREAM_JOB_NAME: run_scenarios
+ DOWNSTREAM_JOB_ARTIFACT_PATH: scores-DOWNSTREAM_JOB_ID.csv
+ OUTPUT_ARTIFACT_PATH: scores.csv
+ before_script:
+ - source scripts/utils.sh
+ - install_gitlab_gem
+ script:
+ - scripts/download-downstream-artifact.rb
+ artifacts:
+ expose_as: 'Code Suggestions evaluation results'
+ paths:
+ - scores.csv
diff --git a/.gitlab/ci/rules.gitlab-ci.yml b/.gitlab/ci/rules.gitlab-ci.yml
index 28d077a3d5d..7db2c15db9b 100644
--- a/.gitlab/ci/rules.gitlab-ci.yml
+++ b/.gitlab/ci/rules.gitlab-ci.yml
@@ -92,6 +92,9 @@
.if-merge-request-labels-run-review-app: &if-merge-request-labels-run-review-app
if: '$CI_MERGE_REQUEST_LABELS =~ /pipeline:run-review-app/'
+.if-merge-request-labels-run-cs-evaluation: &if-merge-request-labels-run-cs-evaluation
+ if: '$CI_MERGE_REQUEST_LABELS =~ /pipeline:run-CS-evaluation/'
+
.if-merge-request-labels-skip-undercoverage: &if-merge-request-labels-skip-undercoverage
if: '$CI_MERGE_REQUEST_LABELS =~ /pipeline:skip-undercoverage/'
@@ -950,6 +953,7 @@
- <<: *if-merge-request
changes: *dependency-patterns
- <<: *if-merge-request-labels-run-all-e2e
+ - <<: *if-merge-request-labels-run-cs-evaluation
- <<: *if-merge-request
changes: *feature-flag-development-config-patterns
- <<: *if-merge-request
@@ -1684,6 +1688,27 @@
rules:
- <<: [*if-dot-com-gitlab-org-schedule, *qa-e2e-test-schedule-variables]
+.qa:rules:code-suggestions-eval-base:
+ rules:
+ - !reference [".strict-ee-only-rules", rules]
+ - <<: *if-fork-merge-request
+ when: never
+ - <<: *if-merge-request-labels-run-cs-evaluation
+
+.qa:rules:code-suggestions-eval:
+ rules:
+ - !reference [".qa:rules:code-suggestions-eval-base", rules]
+ - <<: *if-merge-request
+ changes: *code-patterns
+ when: manual
+ allow_failure: true
+
+.qa:rules:code-suggestions-eval-results:
+ rules:
+ - !reference [".qa:rules:code-suggestions-eval-base", rules]
+ - <<: *if-merge-request
+ changes: *code-patterns
+
# Note: If any changes are made to this rule, the following should also be updated:
# 1) .qa:rules:manual-omnibus-and-follow-up-e2e
# 2) .qa:rules:follow-up-e2e
diff --git a/app/models/concerns/atomic_internal_id.rb b/app/models/concerns/atomic_internal_id.rb
index ec4ee7985fe..f51b0967968 100644
--- a/app/models/concerns/atomic_internal_id.rb
+++ b/app/models/concerns/atomic_internal_id.rb
@@ -219,8 +219,8 @@ module AtomicInternalId
::AtomicInternalId.scope_usage(self.class)
end
- def self.scope_usage(including_class)
- including_class.table_name.to_sym
+ def self.scope_usage(klass)
+ klass.respond_to?(:internal_id_scope_usage) ? klass.internal_id_scope_usage : klass.table_name.to_sym
end
def self.project_init(klass, column_name = :iid)
diff --git a/app/models/project.rb b/app/models/project.rb
index 2b79dee5b45..8f82a947ba6 100644
--- a/app/models/project.rb
+++ b/app/models/project.rb
@@ -751,6 +751,7 @@ class Project < ApplicationRecord
preload(:project_feature, :route, namespace: [:route, :owner])
}
+ scope :with_name, -> (name) { where(name: name) }
scope :created_by, -> (user) { where(creator: user) }
scope :imported_from, -> (type) { where(import_type: type) }
scope :imported, -> { where.not(import_type: nil) }
diff --git a/config/feature_flags/development/manage_project_access_tokens.yml b/config/feature_flags/development/manage_project_access_tokens.yml
deleted file mode 100644
index 7c979257515..00000000000
--- a/config/feature_flags/development/manage_project_access_tokens.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-name: manage_project_access_tokens
-introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/132342
-rollout_issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/430353
-milestone: '16.5'
-type: development
-group: group::authorization
-default_enabled: false
-feature_flag: manage_project_access_tokens
diff --git a/data/deprecations/16-9-verify-after-script-canceled-status.yml b/data/deprecations/16-9-verify-after-script-canceled-status.yml
new file mode 100644
index 00000000000..c34ee4337af
--- /dev/null
+++ b/data/deprecations/16-9-verify-after-script-canceled-status.yml
@@ -0,0 +1,13 @@
+- title: "`after_script` keyword will run for cancelled jobs"
+ # The milestones for the deprecation announcement, and the removal.
+ removal_milestone: "17.0"
+ announcement_milestone: "16.8"
+ # Change breaking_change to false if needed.
+ breaking_change: true
+ # The stage and GitLab username of the person reporting the change,
+ # and a link to the deprecation issue
+ reporter: jreporter
+ stage: verify
+ issue_url: https://gitlab.com/gitlab-org/gitlab/-/issues/437789
+ body: | # (required) Don't change this line.
+ The [`after_script`](https://docs.gitlab.com/ee/ci/yaml/#after_script) CI/CD keyword is used to run additional commands after the main `script` section of a job. This is often used for cleaning up environments or other resources that were used by the job. For many users, the fact that the `after_script` commands do not run if a job is cancelled was unexpected and undesired. In 17.0, the keyword will be updated to also run commands after job cancellation. Make sure that your CI/CD configuration that uses the `after_script` keyword is able to handle running for cancelled jobs as well.
diff --git a/db/docs/batched_background_migrations/backfill_branch_protection_namespace_setting.yml b/db/docs/batched_background_migrations/backfill_branch_protection_namespace_setting.yml
index 9a596cb056e..88e012d8d08 100644
--- a/db/docs/batched_background_migrations/backfill_branch_protection_namespace_setting.yml
+++ b/db/docs/batched_background_migrations/backfill_branch_protection_namespace_setting.yml
@@ -6,4 +6,4 @@ introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/136181
milestone: '16.7'
queued_migration_version: 20231107092912
finalize_after: '2024-01-23'
-finalized_by: # version of the migration that ensured this bbm
+finalized_by: 20240104155616
diff --git a/db/migrate/20240108121335_copy_internal_ids_for_epics_and_issues_usage_on_groups.rb b/db/migrate/20240108121335_copy_internal_ids_for_epics_and_issues_usage_on_groups.rb
new file mode 100644
index 00000000000..205f7b88026
--- /dev/null
+++ b/db/migrate/20240108121335_copy_internal_ids_for_epics_and_issues_usage_on_groups.rb
@@ -0,0 +1,74 @@
+# frozen_string_literal: true
+
+class CopyInternalIdsForEpicsAndIssuesUsageOnGroups < Gitlab::Database::Migration[2.2]
+ include Gitlab::Database::SchemaHelpers
+
+ milestone '16.8'
+ disable_ddl_transaction!
+
+ TRIGGER_ON_INSERT = 'trigger_copy_usage_on_internal_ids_on_insert'
+ TRIGGER_ON_UPDATE = 'trigger_copy_usage_on_internal_ids_on_update'
+ INSERT_OR_UPDATE_FUNCTION_NAME = 'insert_or_update_internal_ids_usage'
+
+ def up
+ execute(<<~SQL)
+ CREATE OR REPLACE FUNCTION #{INSERT_OR_UPDATE_FUNCTION_NAME}()
+ RETURNS trigger
+ LANGUAGE plpgsql
+ AS $$
+ DECLARE
+ namespace_type varchar;
+ copy_usage smallint;
+ BEGIN
+ IF (NEW.usage = 0) THEN
+ copy_usage = 4;
+
+ -- we only care about group level internal_ids so we check namespace type here
+ namespace_type = (SELECT type FROM namespaces WHERE id = NEW.namespace_id);
+ IF (namespace_type <> 'Group') THEN
+ RETURN NULL;
+ END IF;
+ ELSIF (NEW.usage = 4) THEN
+ copy_usage = 0;
+ ELSE
+ RETURN NULL;
+ END IF;
+
+ -- if value is the same there is nothing to update
+ IF (OLD.last_value = NEW.last_value AND (TG_OP = 'INSERT' OR TG_OP = 'UPDATE')) THEN
+ RETURN NULL;
+ END IF;
+
+ INSERT INTO internal_ids (usage, last_value, namespace_id)
+ VALUES (copy_usage, NEW.last_value, NEW.namespace_id)
+ ON CONFLICT (usage, namespace_id) WHERE namespace_id IS NOT NULL
+ DO UPDATE SET last_value = NEW.last_value;
+
+ RETURN NULL;
+ END
+ $$
+ SQL
+
+ execute(<<~SQL)
+ CREATE TRIGGER #{TRIGGER_ON_INSERT}
+ AFTER INSERT ON internal_ids
+ FOR EACH ROW
+ WHEN (((NEW.usage = 0) OR (NEW.usage = 4)) AND NEW.namespace_id IS NOT NULL)
+ EXECUTE FUNCTION #{INSERT_OR_UPDATE_FUNCTION_NAME}();
+ SQL
+
+ execute(<<~SQL)
+ CREATE TRIGGER #{TRIGGER_ON_UPDATE}
+ AFTER UPDATE ON internal_ids
+ FOR EACH ROW
+ WHEN (((NEW.usage = 0) OR (NEW.usage = 4)) AND NEW.namespace_id IS NOT NULL)
+ EXECUTE FUNCTION #{INSERT_OR_UPDATE_FUNCTION_NAME}();
+ SQL
+ end
+
+ def down
+ drop_trigger(:internal_ids, TRIGGER_ON_INSERT)
+ drop_trigger(:internal_ids, TRIGGER_ON_UPDATE)
+ drop_function(INSERT_OR_UPDATE_FUNCTION_NAME)
+ end
+end
diff --git a/db/post_migrate/20240104155616_finalize_backfill_default_branch_protection_namespace_settings.rb b/db/post_migrate/20240104155616_finalize_backfill_default_branch_protection_namespace_settings.rb
new file mode 100644
index 00000000000..b4a23028f21
--- /dev/null
+++ b/db/post_migrate/20240104155616_finalize_backfill_default_branch_protection_namespace_settings.rb
@@ -0,0 +1,24 @@
+# frozen_string_literal: true
+
+class FinalizeBackfillDefaultBranchProtectionNamespaceSettings < Gitlab::Database::Migration[2.2]
+ disable_ddl_transaction!
+
+ restrict_gitlab_migration gitlab_schema: :gitlab_main
+
+ MIGRATION = 'BackfillBranchProtectionNamespaceSetting'
+
+ milestone '16.8'
+ def up
+ ensure_batched_background_migration_is_finished(
+ job_class_name: MIGRATION,
+ table_name: :namespace_settings,
+ column_name: :namespace_id,
+ job_arguments: [],
+ finalize: true
+ )
+ end
+
+ def down
+ # noop
+ end
+end
diff --git a/db/post_migrate/20240108125135_remove_internal_ids_triggers.rb b/db/post_migrate/20240108125135_remove_internal_ids_triggers.rb
new file mode 100644
index 00000000000..861aad2b288
--- /dev/null
+++ b/db/post_migrate/20240108125135_remove_internal_ids_triggers.rb
@@ -0,0 +1,74 @@
+# frozen_string_literal: true
+
+class RemoveInternalIdsTriggers < Gitlab::Database::Migration[2.2]
+ include Gitlab::Database::SchemaHelpers
+
+ milestone '16.8'
+ disable_ddl_transaction!
+
+ TRIGGER_ON_INSERT = 'trigger_copy_usage_on_internal_ids_on_insert'
+ TRIGGER_ON_UPDATE = 'trigger_copy_usage_on_internal_ids_on_update'
+ INSERT_OR_UPDATE_FUNCTION_NAME = 'insert_or_update_internal_ids_usage'
+
+ def up
+ drop_trigger(:internal_ids, TRIGGER_ON_INSERT)
+ drop_trigger(:internal_ids, TRIGGER_ON_UPDATE)
+ drop_function(INSERT_OR_UPDATE_FUNCTION_NAME)
+ end
+
+ def down
+ execute(<<~SQL)
+ CREATE OR REPLACE FUNCTION #{INSERT_OR_UPDATE_FUNCTION_NAME}()
+ RETURNS trigger
+ LANGUAGE plpgsql
+ AS $$
+ DECLARE
+ namespace_type varchar;
+ copy_usage smallint;
+ BEGIN
+ IF (NEW.usage = 0) THEN
+ copy_usage = 4;
+
+ -- we only care about group level internal_ids so we check namespace type here
+ namespace_type = (SELECT type FROM namespaces WHERE id = NEW.namespace_id);
+ IF (namespace_type <> 'Group') THEN
+ RETURN NULL;
+ END IF;
+ ELSIF (NEW.usage = 4) THEN
+ copy_usage = 0;
+ ELSE
+ RETURN NULL;
+ END IF;
+
+ -- if value is the same there is nothing to update
+ IF (OLD.last_value = NEW.last_value AND (TG_OP = 'INSERT' OR TG_OP = 'UPDATE')) THEN
+ RETURN NULL;
+ END IF;
+
+ INSERT INTO internal_ids (usage, last_value, namespace_id)
+ VALUES (copy_usage, NEW.last_value, NEW.namespace_id)
+ ON CONFLICT (usage, namespace_id) WHERE namespace_id IS NOT NULL
+ DO UPDATE SET last_value = NEW.last_value;
+
+ RETURN NULL;
+ END
+ $$
+ SQL
+
+ execute(<<~SQL)
+ CREATE TRIGGER #{TRIGGER_ON_INSERT}
+ AFTER INSERT ON internal_ids
+ FOR EACH ROW
+ WHEN (((NEW.usage = 0) OR (NEW.usage = 4)) AND NEW.namespace_id IS NOT NULL)
+ EXECUTE FUNCTION #{INSERT_OR_UPDATE_FUNCTION_NAME}();
+ SQL
+
+ execute(<<~SQL)
+ CREATE TRIGGER #{TRIGGER_ON_UPDATE}
+ AFTER UPDATE ON internal_ids
+ FOR EACH ROW
+ WHEN (((NEW.usage = 0) OR (NEW.usage = 4)) AND NEW.namespace_id IS NOT NULL)
+ EXECUTE FUNCTION #{INSERT_OR_UPDATE_FUNCTION_NAME}();
+ SQL
+ end
+end
diff --git a/db/post_migrate/20240108125335_add_temporary_index_internal_ids_on_id_and_usage.rb b/db/post_migrate/20240108125335_add_temporary_index_internal_ids_on_id_and_usage.rb
new file mode 100644
index 00000000000..798a42d2a1d
--- /dev/null
+++ b/db/post_migrate/20240108125335_add_temporary_index_internal_ids_on_id_and_usage.rb
@@ -0,0 +1,17 @@
+# frozen_string_literal: true
+
+class AddTemporaryIndexInternalIdsOnIdAndUsage < Gitlab::Database::Migration[2.2]
+ milestone '16.8'
+ disable_ddl_transaction!
+
+ INDEX_NAME = "tmp_index_internal_ids_on_id_and_usage"
+ EPICS_USAGE = 4 # see Enums::InternalId#usage_resources[:epics]
+
+ def up
+ add_concurrent_index :internal_ids, :id, name: INDEX_NAME, where: "usage = #{EPICS_USAGE}"
+ end
+
+ def down
+ remove_concurrent_index_by_name :internal_ids, name: INDEX_NAME
+ end
+end
diff --git a/db/post_migrate/20240108185335_backfill_internal_ids_with_issues_usage_for_epics.rb b/db/post_migrate/20240108185335_backfill_internal_ids_with_issues_usage_for_epics.rb
new file mode 100644
index 00000000000..4131f403212
--- /dev/null
+++ b/db/post_migrate/20240108185335_backfill_internal_ids_with_issues_usage_for_epics.rb
@@ -0,0 +1,56 @@
+# frozen_string_literal: true
+
+class BackfillInternalIdsWithIssuesUsageForEpics < Gitlab::Database::Migration[2.2]
+ milestone '16.8'
+ restrict_gitlab_migration gitlab_schema: :gitlab_main
+ disable_ddl_transaction!
+
+ BATCH_SIZE = 500
+ ISSUES_USAGE = 0 # see Enums::InternalId#usage_resources[:issues]
+ EPICS_USAGE = 4 # see Enums::InternalId#usage_resources[:epics]
+
+ def up
+ internal_id_model = define_batchable_model('internal_ids')
+ epic_model = define_batchable_model('epics')
+
+ internal_id_model.where(usage: EPICS_USAGE).each_batch(of: BATCH_SIZE) do |relation|
+ # Creates a corresponding `usage: :issues` record for every `epics` usage.
+ # On conflict it means the record was already created when a new epic was created with the newly issues usage.
+ # In which case to make sure we have the value copied over from epics record.
+ connection.execute(
+ <<~SQL
+ INSERT INTO internal_ids (usage, last_value, namespace_id)
+ SELECT #{ISSUES_USAGE}, last_value, namespace_id
+ FROM internal_ids
+ WHERE internal_ids.id IN(#{relation.select(:id).to_sql})
+ ON CONFLICT (usage, namespace_id) WHERE namespace_id IS NOT NULL
+ DO UPDATE SET last_value = GREATEST(EXCLUDED.last_value, internal_ids.last_value)
+ RETURNING id;
+ SQL
+ )
+
+ relation.delete_all
+ end
+
+ # there are a couple records in epics table that reference namespaces without a corresponding entry
+ # in internal_ids, for whatever reason, so this statement addresses that.
+ epic_model.distinct_each_batch(column: :group_id, of: BATCH_SIZE) do |relation|
+ connection.execute(
+ <<~SQL
+ INSERT INTO internal_ids (usage, last_value, namespace_id)
+ SELECT #{ISSUES_USAGE}, max(iid) as last_value, group_id
+ FROM epics
+ WHERE group_id IN(#{relation.to_sql})
+ GROUP BY group_id
+ ON CONFLICT (usage, namespace_id) WHERE namespace_id IS NOT NULL
+ DO NOTHING
+ RETURNING id;
+ SQL
+ )
+ end
+ end
+
+ def down
+ # noop
+ end
+end
diff --git a/db/post_migrate/20240108215335_remove_internal_ids_tmp_index.rb b/db/post_migrate/20240108215335_remove_internal_ids_tmp_index.rb
new file mode 100644
index 00000000000..e6ed936afc3
--- /dev/null
+++ b/db/post_migrate/20240108215335_remove_internal_ids_tmp_index.rb
@@ -0,0 +1,17 @@
+# frozen_string_literal: true
+
+class RemoveInternalIdsTmpIndex < Gitlab::Database::Migration[2.2]
+ milestone '16.8'
+ disable_ddl_transaction!
+
+ INDEX_NAME = "tmp_index_internal_ids_on_id_and_usage"
+ EPICS_USAGE = 4 # see Enums::InternalId#usage_resources[:epics]
+
+ def up
+ remove_concurrent_index_by_name :internal_ids, name: INDEX_NAME
+ end
+
+ def down
+ add_concurrent_index :internal_ids, :id, name: INDEX_NAME, where: "usage = #{EPICS_USAGE}"
+ end
+end
diff --git a/db/schema_migrations/20240104155616 b/db/schema_migrations/20240104155616
new file mode 100644
index 00000000000..b30a2a3aba4
--- /dev/null
+++ b/db/schema_migrations/20240104155616
@@ -0,0 +1 @@
+167d6118d2c7ccd83662af4304e543163043a962c66d67d338d3852ba36b53a8 \ No newline at end of file
diff --git a/db/schema_migrations/20240108121335 b/db/schema_migrations/20240108121335
new file mode 100644
index 00000000000..a8a63cc44c9
--- /dev/null
+++ b/db/schema_migrations/20240108121335
@@ -0,0 +1 @@
+42975addff21ed60838f893a447055317aa08aad7b792205385dc4e06c7c523a \ No newline at end of file
diff --git a/db/schema_migrations/20240108125135 b/db/schema_migrations/20240108125135
new file mode 100644
index 00000000000..c69a67409ee
--- /dev/null
+++ b/db/schema_migrations/20240108125135
@@ -0,0 +1 @@
+31083622ee26968a55a6cf84608e2f13ada6a142a8bd302b5f39bbd6facb911a \ No newline at end of file
diff --git a/db/schema_migrations/20240108125335 b/db/schema_migrations/20240108125335
new file mode 100644
index 00000000000..194113c1076
--- /dev/null
+++ b/db/schema_migrations/20240108125335
@@ -0,0 +1 @@
+37a95779658d4c814f64f6f6b1909b79fa3b55f90d93581fa09a2f5d7a600c99 \ No newline at end of file
diff --git a/db/schema_migrations/20240108185335 b/db/schema_migrations/20240108185335
new file mode 100644
index 00000000000..824b9fc59a4
--- /dev/null
+++ b/db/schema_migrations/20240108185335
@@ -0,0 +1 @@
+522d57a8594965ee9756ec4f0fe6f66515348a4b99606607b781ffd177982427 \ No newline at end of file
diff --git a/db/schema_migrations/20240108215335 b/db/schema_migrations/20240108215335
new file mode 100644
index 00000000000..8f9df917d12
--- /dev/null
+++ b/db/schema_migrations/20240108215335
@@ -0,0 +1 @@
+57d6347cfbd9169fe7a104b493632492dc1bb32729a761354feed79a886eecf6 \ No newline at end of file
diff --git a/doc/api/graphql/getting_started.md b/doc/api/graphql/getting_started.md
index cf756027e01..5dea829b91a 100644
--- a/doc/api/graphql/getting_started.md
+++ b/doc/api/graphql/getting_started.md
@@ -285,10 +285,9 @@ in `CI_JOB_TOKEN` scoping behavior.
```graphql
mutation DisableCI_JOB_TOKENscope {
- projectCiCdSettingsUpdate(input:{fullPath: "<namespace>/<project-name>", inboundJobTokenScopeEnabled: false, jobTokenScopeEnabled: false}) {
+ projectCiCdSettingsUpdate(input:{fullPath: "<namespace>/<project-name>", inboundJobTokenScopeEnabled: false}) {
ciCdSettings {
inboundJobTokenScopeEnabled
- jobTokenScopeEnabled
}
errors
}
diff --git a/doc/development/cicd/components.md b/doc/development/cicd/components.md
new file mode 100644
index 00000000000..56ab5a24bd1
--- /dev/null
+++ b/doc/development/cicd/components.md
@@ -0,0 +1,80 @@
+---
+stage: Verify
+group: Pipeline Authoring
+info: Any user with at least the Maintainer role can merge updates to this content. For details, see https://docs.gitlab.com/ee/development/development_processes.html#development-guidelines-review.
+---
+
+# Development guide for GitLab CI/CD components
+
+This document explains how to develop [CI/CD components](../../ci/components/index.md) that are maintained by GitLab.
+
+The official location for all GitLab-maintained component projects is the [`gitlab.com/components`](https://gitlab.com/components) group.
+This group contains all components that are designed to be generic, served to all GitLab users, and maintained by GitLab.
+
+A component project can initially be created under a different group (for example `gitlab-org`)
+but they need to be moved into the `components` group before the first version gets published to the catalog.
+
+Components that are for GitLab internal use only, for example specific to `gitlab-org/gitlab` project, should be
+implemented under `gitlab-org` group.
+
+Component projects that are expected to be published in the [CI/CD catalog](../../ci/components/index.md#cicd-catalog)
+should first be dogfooded it to ensure we stay on top of the project quality and have first-hand
+experience with it.
+
+## Define ownership
+
+GitLab-maintained components are trusted by the community and require a high degree of quality and timely maintenance.
+Components must be kept up to date, monitored for security vulnerabilities, and bugs fixed.
+
+Each component project must have a set of owners and maintainers that are also domain experts.
+Experts can be from any department in GitLab, from Engineering to Support, Customer Success, and Developer Relations.
+
+If a component is related to a GitLab feature (for example Secret Detection), the team that owns the
+feature category or is most closely related to it should maintain the project.
+
+The component project can be created by a separate team or individual initially but it must be transitioned
+to a set of owners before the first version gets published to the catalog.
+
+The `README.md` file in the project repository must indicate the main owners of the project so that
+they can be contacted by the wider community if needed.
+
+NOTE:
+If a set of project owners cannot be guaranteed or the components cannot be dogfooded, we strongly recommend
+not creating a GitLab-maintained component project and instead let the wider community fulfill the demand
+in the catalog.
+
+## Development process
+
+1. Create a project under [`gitlab.com/components`](https://gitlab.com/components)
+ or ask one of the group owners to create an empty project for you.
+1. Follow the [standard guide for creating components](../../ci/components/index.md).
+1. Add a concise project description that clearly describes the capabilities offered by the component project.
+1. Ensure that the [best practices](../../ci/components/index.md#best-practices) are followed.
+1. Use [semantic versioning](https://semver.org) in the form `MAJOR.MINOR` or `MAJOR.MINOR.PATCH`.
+1. Add a `LICENSE.md` file with the MIT license.
+1. The project must have a `.gitlab-ci.yml` file that:
+ - Validates all the components in the project correctly.
+ - Contains a `release` job to publish newly released tags to the catalog.
+1. Ensure that the `README.md` contains at least the sections below (for example, see the [Code quality component](https://gitlab.com/components/code-quality)):
+ - **Overview**: The capabilities offered by the component project.
+ - **Components**: Sub-sections for each component, each with:
+ - **Usage**: Examples with and without inputs (when optional).
+ - **Inputs**: A table showing the input names, types, default values (if any) and descriptions.
+ - **Variables** (when applicable): The variable names, possible values, and descriptions.
+ - **Contribute**: Notes and how to get in touch with the maintainers.
+ Usually the contribution process should follow the [official guide](../../ci/components/index.md).
+1. Upload the [official avatar image](img/avatar_component_project.png) to the component project.
+
+## Review and contribution process
+
+It's possible that components in the project have a related [CI/CD template](templates.md) in the GitLab codebase.
+In that case we need to cross link the component project and CI/CD template:
+
+- Add a comment in the CI/CD template with the location of the related component project.
+- Add a section in the `README.md` of the component project with the location of the existing CI/CD template.
+
+When changes are applied to these components, check whether we can integrate the changes in the CI/CD template too.
+This might not be possible due to the rigidity of versioning in CI/CD templates.
+
+Ping [`@gitlab-org/maintainers/ci-components`](https://gitlab.com/groups/gitlab-org/maintainers/ci-components/-/group_members?with_inherited_permissions=exclude)
+for reviews to ensure that the components are written in consistent style and follow the best practices.
diff --git a/doc/development/cicd/img/avatar_component_project.png b/doc/development/cicd/img/avatar_component_project.png
new file mode 100644
index 00000000000..e5c20d108fa
--- /dev/null
+++ b/doc/development/cicd/img/avatar_component_project.png
Binary files differ
diff --git a/doc/development/cicd/templates.md b/doc/development/cicd/templates.md
index a2b490b9106..bd3023ebf8d 100644
--- a/doc/development/cicd/templates.md
+++ b/doc/development/cicd/templates.md
@@ -13,7 +13,7 @@ we encourage team members to create [CI/CD components](../../ci/components/index
for the catalog. This transition enhances the modularity and maintainability of our
shared CI/CD resources, and avoids the complexities of contributing new CI/CD templates.
If you need to update an existing template, you must also update the matching CI/CD component.
-If no component exists that matches the CI/CD template yet, consider creating the matching component.
+If no component exists that matches the CI/CD template yet, consider [creating the matching component](components.md).
This ensures that template and component functionality remain in sync, aligning with
our new development practices.
diff --git a/doc/development/pipelines/internals.md b/doc/development/pipelines/internals.md
index 04c1d1f32e8..df9a9d9c4ad 100644
--- a/doc/development/pipelines/internals.md
+++ b/doc/development/pipelines/internals.md
@@ -219,6 +219,7 @@ and included in `rules` definitions via [YAML anchors](../../ci/yaml/yaml_optimi
| `if-merge-request-title-as-if-foss` | Matches if the pipeline is for a merge request and the MR has label ~"pipeline:run-as-if-foss" | |
| `if-merge-request-title-update-caches` | Matches if the pipeline is for a merge request and the MR has label ~"pipeline:update-cache". | |
| `if-merge-request-labels-run-all-rspec` | Matches if the pipeline is for a merge request and the MR has label ~"pipeline:run-all-rspec". | |
+| `if-merge-request-labels-run-cs-evaluation` | Matches if the pipeline is for a merge request and the MR has label ~"pipeline:run-CS-evaluation". | |
| `if-security-merge-request` | Matches if the pipeline is for a security merge request. | |
| `if-security-schedule` | Matches if the pipeline is for a security scheduled pipeline. | |
| `if-nightly-master-schedule` | Matches if the pipeline is for a `master` scheduled pipeline with `$NIGHTLY` set. | |
diff --git a/doc/update/deprecations.md b/doc/update/deprecations.md
index c980e1c160f..b4694bdf0d9 100644
--- a/doc/update/deprecations.md
+++ b/doc/update/deprecations.md
@@ -1337,6 +1337,20 @@ removed in 17.0.
<div class="deprecation breaking-change" data-milestone="17.0">
+### `after_script` keyword will run for cancelled jobs
+
+<div class="deprecation-notes">
+- Announced in GitLab <span class="milestone">16.8</span>
+- Removal in GitLab <span class="milestone">17.0</span> ([breaking change](https://docs.gitlab.com/ee/update/terminology.html#breaking-change))
+- To discuss this change or learn more, see the [deprecation issue](https://gitlab.com/gitlab-org/gitlab/-/issues/437789).
+</div>
+
+The [`after_script`](https://docs.gitlab.com/ee/ci/yaml/#after_script) CI/CD keyword is used to run additional commands after the main `script` section of a job. This is often used for cleaning up environments or other resources that were used by the job. For many users, the fact that the `after_script` commands do not run if a job is cancelled was unexpected and undesired. In 17.0, the keyword will be updated to also run commands after job cancellation. Make sure that your CI/CD configuration that uses the `after_script` keyword is able to handle running for cancelled jobs as well.
+
+</div>
+
+<div class="deprecation breaking-change" data-milestone="17.0">
+
### `metric` filter and `value` field for DORA API
<div class="deprecation-notes">
diff --git a/locale/gitlab.pot b/locale/gitlab.pot
index 70425f7ff1a..f2f93a2a6c8 100644
--- a/locale/gitlab.pot
+++ b/locale/gitlab.pot
@@ -43588,6 +43588,9 @@ msgstr ""
msgid "Security Finding not found"
msgstr ""
+msgid "Security Policy project already exists, but is not linked."
+msgstr ""
+
msgid "Security Policy project already exists."
msgstr ""
diff --git a/package.json b/package.json
index a7b8340890d..2ae77d2c25d 100644
--- a/package.json
+++ b/package.json
@@ -60,7 +60,7 @@
"@gitlab/favicon-overlay": "2.0.0",
"@gitlab/fonts": "^1.3.0",
"@gitlab/svgs": "3.74.0",
- "@gitlab/ui": "^72.5.0",
+ "@gitlab/ui": "^72.5.1",
"@gitlab/visual-review-tools": "1.7.3",
"@gitlab/web-ide": "^0.0.1-dev-20231211152737",
"@mattiasbuelens/web-streams-adapter": "^0.1.0",
@@ -231,7 +231,7 @@
"yaml": "^2.0.0-10"
},
"devDependencies": {
- "@gitlab/eslint-plugin": "19.3.0",
+ "@gitlab/eslint-plugin": "19.4.0",
"@gitlab/stylelint-config": "5.0.1",
"@graphql-eslint/eslint-plugin": "3.20.1",
"@originjs/vite-plugin-commonjs": "^1.0.3",
diff --git a/scripts/download-downstream-artifact.rb b/scripts/download-downstream-artifact.rb
new file mode 100755
index 00000000000..23c400a9add
--- /dev/null
+++ b/scripts/download-downstream-artifact.rb
@@ -0,0 +1,121 @@
+#!/usr/bin/env ruby
+
+# frozen_string_literal: true
+
+require 'gitlab'
+
+require_relative 'api/default_options'
+
+# This class allows an upstream job to fetch an artifact from a job in a downstream pipeline.
+#
+# Until https://gitlab.com/gitlab-org/gitlab/-/issues/285100 is resolved it's not straightforward for an upstream
+# pipeline to use artifacts from a downstream pipeline. There is a workaround for parent-child pipelines (see the issue)
+# but it relies on CI_MERGE_REQUEST_REF_PATH so it doesn't work for multi-project pipelines.
+#
+# This uses the Jobs API to get pipeline bridges (trigger jobs) and the Job artifacts API to download artifacts.
+# - https://docs.gitlab.com/ee/api/jobs.html#list-pipeline-trigger-jobs
+# - https://docs.gitlab.com/ee/api/job_artifacts.html
+#
+# Note: This class also works for parent-child pipelines within the same project, it's just not necessary in that case.
+class DownloadDownstreamArtifact
+ def initialize(options)
+ @upstream_project = options.fetch(:upstream_project, API::DEFAULT_OPTIONS[:project])
+ @upstream_pipeline_id = options.fetch(:upstream_pipeline_id, API::DEFAULT_OPTIONS[:pipeline_id])
+ @downstream_project = options.fetch(:downstream_project, API::DEFAULT_OPTIONS[:project])
+ @downstream_job_name = options.fetch(:downstream_job_name)
+ @trigger_job_name = options.fetch(:trigger_job_name)
+ @downstream_artifact_path = options.fetch(:downstream_artifact_path)
+ @output_artifact_path = options.fetch(:output_artifact_path)
+
+ unless options.key?(:api_token)
+ raise ArgumentError, 'PROJECT_TOKEN_FOR_CI_SCRIPTS_API_USAGE is required to access downstream pipelines'
+ end
+
+ api_token = options.fetch(:api_token)
+
+ @client = Gitlab.client(
+ endpoint: options.fetch(:endpoint, API::DEFAULT_OPTIONS[:endpoint]),
+ private_token: api_token
+ )
+ end
+
+ def execute
+ unless downstream_pipeline
+ abort("Could not find downstream pipeline triggered via #{trigger_job_name} in project #{downstream_project}")
+ end
+
+ unless downstream_job
+ abort("Could not find job with name '#{downstream_job_name}' in #{downstream_pipeline['web_url']}")
+ end
+
+ puts "Fetching scores artifact from downstream pipeline triggered via #{trigger_job_name}..."
+ puts "Downstream pipeline is #{downstream_pipeline['web_url']}."
+ puts %(Downstream job "#{downstream_job_name}": #{downstream_job['web_url']}.)
+
+ path = downstream_artifact_path.sub('DOWNSTREAM_JOB_ID', downstream_job.id.to_s)
+ puts %(Fetching artifact "#{path}" from #{downstream_job_name}...)
+
+ download_and_save_artifact(path)
+
+ puts "Artifact saved as #{output_artifact_path} ..."
+ end
+
+ def self.options_from_env
+ API::DEFAULT_OPTIONS.merge({
+ upstream_project: API::DEFAULT_OPTIONS[:project],
+ upstream_pipeline_id: API::DEFAULT_OPTIONS[:pipeline_id],
+ downstream_project: ENV.fetch('DOWNSTREAM_PROJECT', API::DEFAULT_OPTIONS[:project]),
+ downstream_job_name: ENV['DOWNSTREAM_JOB_NAME'],
+ trigger_job_name: ENV['TRIGGER_JOB_NAME'],
+ downstream_artifact_path: ENV['DOWNSTREAM_JOB_ARTIFACT_PATH'],
+ output_artifact_path: ENV['OUTPUT_ARTIFACT_PATH']
+ }).except(:project, :pipeline_id)
+ end
+
+ private
+
+ attr_reader :downstream_artifact_path,
+ :output_artifact_path,
+ :downstream_job_name,
+ :trigger_job_name,
+ :upstream_project,
+ :downstream_project,
+ :upstream_pipeline_id,
+ :client
+
+ def bridge
+ @bridge ||= client
+ .pipeline_bridges(upstream_project, upstream_pipeline_id, per_page: 100)
+ .auto_paginate
+ .find { |job| job.name.include?(trigger_job_name) }
+ end
+
+ def downstream_pipeline
+ @downstream_pipeline ||=
+ if bridge&.downstream_pipeline.nil?
+ nil
+ else
+ client.pipeline(downstream_project, bridge.downstream_pipeline.id)
+ end
+ end
+
+ def downstream_job
+ @downstream_job ||= client
+ .pipeline_jobs(downstream_project, downstream_pipeline.id)
+ .find { |job| job.name.include?(downstream_job_name) }
+ end
+
+ def download_and_save_artifact(job_artifact_path)
+ file_response = client.download_job_artifact_file(downstream_project, downstream_job.id, job_artifact_path)
+
+ file_response.respond_to?(:read) || abort("Could not download artifact. Request returned: #{file_response}")
+
+ File.write(output_artifact_path, file_response.read)
+ end
+end
+
+if $PROGRAM_NAME == __FILE__
+ options = DownloadDownstreamArtifact.options_from_env
+
+ DownloadDownstreamArtifact.new(options).execute
+end
diff --git a/spec/migrations/20240108121335_copy_internal_ids_for_epics_and_issues_usage_on_groups_spec.rb b/spec/migrations/20240108121335_copy_internal_ids_for_epics_and_issues_usage_on_groups_spec.rb
new file mode 100644
index 00000000000..c54f1f06d43
--- /dev/null
+++ b/spec/migrations/20240108121335_copy_internal_ids_for_epics_and_issues_usage_on_groups_spec.rb
@@ -0,0 +1,69 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+require_migration!
+
+RSpec.describe CopyInternalIdsForEpicsAndIssuesUsageOnGroups, feature_category: :team_planning do
+ let(:internal_ids) { table(:internal_ids) }
+ let(:namespaces) { table(:namespaces) }
+
+ let!(:group1) { namespaces.create!(name: 'group1', path: 'group1', type: 'Group') }
+ let!(:group2) { namespaces.create!(name: 'group2', path: 'group2', type: 'Group') }
+ let!(:group3) { namespaces.create!(name: 'group3', path: 'group3', type: 'Group') }
+ let!(:project_namespace) { namespaces.create!(name: 'project1', path: 'project1', type: 'Project') }
+
+ let!(:project_issue_iid) { internal_ids.create!(namespace_id: project_namespace.id, usage: 0, last_value: 100) }
+ let!(:group1_epic_iid) { internal_ids.create!(namespace_id: group1.id, usage: 4, last_value: 101) }
+ let!(:group2_issue_iid) { internal_ids.create!(namespace_id: group2.id, usage: 0, last_value: 102) }
+ let!(:group2_epic_iid) { internal_ids.create!(namespace_id: group2.id, usage: 4, last_value: 103) }
+
+ describe '#on_update' do
+ it 'updates corresponding usage record between epics and issues' do
+ # create the triggers
+ migrate!
+
+ # initially there is no record for issues usage for group1
+ expect(internal_ids.where(usage: 0, namespace_id: group1.id).count).to eq(0)
+ # when epics usage record is updated the issues usage record is created and last_value is copied
+ group1_epic_iid.update!(last_value: 1000)
+ expect(internal_ids.where(usage: 0, namespace_id: group1.id).first.last_value).to eq(1000)
+
+ # when there is an issues usage record:
+ expect(internal_ids.where(usage: 0, namespace_id: group2.id).first.last_value).to eq(102)
+ # updates the issues usage record when epics usage record is updated
+ group2_epic_iid.update!(last_value: 1000)
+ expect(internal_ids.where(usage: 0, namespace_id: group2.id).first.last_value).to eq(1000)
+
+ expect(internal_ids.where(usage: 4, namespace_id: group2.id).first.last_value).to eq(1000)
+ group2_issue_iid.update!(last_value: 2000)
+ expect(internal_ids.where(usage: 4, namespace_id: group2.id).first.last_value).to eq(2000)
+ end
+ end
+
+ describe '#on_insert' do
+ it 'inserts corresponding usage record between epics and issues' do
+ migrate!
+
+ expect(internal_ids.where(usage: 0, namespace_id: group3.id).count).to eq(0)
+ expect(internal_ids.where(usage: 4, namespace_id: group3.id).count).to eq(0)
+
+ # create record for epics usage
+ internal_ids.create!(namespace_id: group3.id, usage: 4, last_value: 1000)
+
+ expect(internal_ids.where(usage: 0, namespace_id: group3.id).first.last_value).to eq(1000)
+ expect(internal_ids.where(usage: 4, namespace_id: group3.id).first.last_value).to eq(1000)
+
+ # cleanup records for group3
+ internal_ids.where(namespace_id: group3.id).delete_all
+
+ expect(internal_ids.where(usage: 0, namespace_id: group3.id).count).to eq(0)
+ expect(internal_ids.where(usage: 4, namespace_id: group3.id).count).to eq(0)
+
+ # create record for issues usage
+ internal_ids.create!(namespace_id: group3.id, usage: 0, last_value: 1000)
+
+ expect(internal_ids.where(usage: 0, namespace_id: group3.id).first.last_value).to eq(1000)
+ expect(internal_ids.where(usage: 4, namespace_id: group3.id).first.last_value).to eq(1000)
+ end
+ end
+end
diff --git a/spec/migrations/20240108185335_backfill_internal_ids_with_issues_usage_for_epics_spec.rb b/spec/migrations/20240108185335_backfill_internal_ids_with_issues_usage_for_epics_spec.rb
new file mode 100644
index 00000000000..31cc9c3dd39
--- /dev/null
+++ b/spec/migrations/20240108185335_backfill_internal_ids_with_issues_usage_for_epics_spec.rb
@@ -0,0 +1,52 @@
+# frozen_string_literal: true
+
+require 'spec_helper'
+require_migration!
+
+RSpec.describe BackfillInternalIdsWithIssuesUsageForEpics, feature_category: :team_planning do
+ let(:internal_ids) { table(:internal_ids) }
+ let(:users) { table(:users) }
+ let(:namespaces) { table(:namespaces) }
+ let(:epics) { table(:epics) }
+
+ let!(:author) { users.create!(projects_limit: 0, email: 'human@example.com') }
+
+ let!(:group1) { namespaces.create!(name: 'group1', path: 'group1', type: 'Group') }
+ let!(:group2) { namespaces.create!(name: 'group2', path: 'group2', type: 'Group') }
+ let!(:group3) { namespaces.create!(name: 'group3', path: 'group3', type: 'Group') }
+ let!(:group4) { namespaces.create!(name: 'group4', path: 'group4', type: 'Group') }
+ let!(:project_namespace) { namespaces.create!(name: 'project1', path: 'project1', type: 'Project') }
+
+ let!(:project_issue_iid) { internal_ids.create!(namespace_id: project_namespace.id, usage: 0, last_value: 100) }
+ let!(:group1_epic_iid) { internal_ids.create!(namespace_id: group1.id, usage: 4, last_value: 100) }
+
+ # when there are issues and epics usage records for same namespace and EPICS usage last_value is higher
+ let!(:group2_issue_iid) { internal_ids.create!(namespace_id: group2.id, usage: 0, last_value: 100) }
+ let!(:group2_epic_iid) { internal_ids.create!(namespace_id: group2.id, usage: 4, last_value: 110) }
+
+ # when there are issues and epics usage records for same namespace and ISSUES usage last_value is higher
+ let!(:group3_issue_iid) { internal_ids.create!(namespace_id: group3.id, usage: 0, last_value: 100) }
+ let!(:group3_epic_iid) { internal_ids.create!(namespace_id: group3.id, usage: 4, last_value: 110) }
+
+ let!(:group4_epic) do
+ epics.create!(title: 'Epic99', title_html: 'Epic99', group_id: group4.id, iid: 99, author_id: author.id)
+ end
+
+ describe '#up' do
+ it 'backfills internal_ids for epics as group level issues' do
+ issues_iid_namespaces = [group1.id, group2.id, group3.id, group4.id, project_namespace.id]
+ # project, group2, group3
+ expect(internal_ids.where(usage: 0).count).to eq(3)
+ # group1, group2, group3
+ expect(internal_ids.where(usage: 4).count).to eq(3)
+ migrate!
+
+ # project1, group1, group2, group3, group4(this just had the epics record but not the internal_ids record)
+ expect(internal_ids.where(usage: 0).count).to eq(5)
+ expect(internal_ids.where(usage: 0).pluck(:namespace_id)).to match_array(issues_iid_namespaces)
+ expect(internal_ids.where(usage: 0, namespace_id: group2.id).first.last_value).to eq(110)
+ expect(internal_ids.where(usage: 0, namespace_id: group3.id).first.last_value).to eq(110)
+ expect(internal_ids.where(usage: 4).count).to eq(0)
+ end
+ end
+end
diff --git a/spec/scripts/download_downstream_artifact_spec.rb b/spec/scripts/download_downstream_artifact_spec.rb
new file mode 100644
index 00000000000..05d1dc9933f
--- /dev/null
+++ b/spec/scripts/download_downstream_artifact_spec.rb
@@ -0,0 +1,180 @@
+# frozen_string_literal: true
+
+require 'fast_spec_helper'
+require 'gitlab/rspec/all'
+require_relative '../../scripts/download-downstream-artifact'
+
+# rubocop:disable RSpec/VerifiedDoubles -- doubles are simple mocks of a few methods from external code
+
+RSpec.describe DownloadDownstreamArtifact, feature_category: :tooling do
+ include StubENV
+
+ subject(:execute) { described_class.new(options).execute }
+
+ before do
+ stub_env('PROJECT_TOKEN_FOR_CI_SCRIPTS_API_USAGE', nil)
+ stub_env('CI_PROJECT_ID', nil)
+ stub_env('CI_PIPELINE_ID', nil)
+ stub_env('CI_API_V4_URL', nil)
+ stub_env('DOWNSTREAM_PROJECT', nil)
+ stub_env('DOWNSTREAM_JOB_NAME', nil)
+ stub_env('TRIGGER_JOB_NAME', nil)
+ stub_env('DOWNSTREAM_JOB_ARTIFACT_PATH', nil)
+ stub_env('OUTPUT_ARTIFACT_PATH', nil)
+ end
+
+ describe '#execute' do
+ let(:options) do
+ {
+ api_token: 'asdf1234',
+ endpoint: 'https://gitlab.com/api/v4',
+ upstream_project: 'upstream/project',
+ upstream_pipeline_id: 123,
+ downstream_project: 'downstream/project',
+ downstream_job_name: 'test-job',
+ trigger_job_name: 'trigger-job',
+ downstream_artifact_path: 'scores-DOWNSTREAM_JOB_ID.csv',
+ output_artifact_path: 'scores.csv'
+ }
+ end
+
+ let(:client) { double('Gitlab::Client') }
+ let(:artifact_response) { double('io', read: 'artifact content') }
+
+ let(:job) do
+ Struct.new(:id, :name, :web_url).new(789, 'test-job', 'https://example.com/jobs/789')
+ end
+
+ let(:downstream_pipeline) do
+ Struct.new(:id, :web_url).new(111, 'https://example.com/pipelines/111')
+ end
+
+ let(:pipeline_bridges) do
+ double('pipeline_bridges', auto_paginate: [double(name: 'trigger-job', downstream_pipeline: downstream_pipeline)])
+ end
+
+ let(:expected_output) do
+ <<~OUTPUT
+ Fetching scores artifact from downstream pipeline triggered via trigger-job...
+ Downstream pipeline is https://example.com/pipelines/111.
+ Downstream job "test-job": https://example.com/jobs/789.
+ Fetching artifact "scores-789.csv" from test-job...
+ Artifact saved as scores.csv ...
+ OUTPUT
+ end
+
+ before do
+ allow(Gitlab).to receive(:client)
+ .with(endpoint: options[:endpoint], private_token: options[:api_token])
+ .and_return(client)
+
+ allow(client).to receive(:pipeline_bridges).and_return(pipeline_bridges)
+ allow(client).to receive(:pipeline).and_return(downstream_pipeline)
+ allow(client).to receive(:pipeline_jobs).and_return([job])
+ allow(client).to receive(:download_job_artifact_file).and_return(artifact_response)
+ allow(File).to receive(:write)
+ end
+
+ it 'downloads artifact from downstream pipeline' do
+ expect(client).to receive(:download_job_artifact_file).with('downstream/project', 789, 'scores-789.csv')
+
+ expect { execute }.to output(expected_output).to_stdout
+ end
+
+ it 'saves artifact to output path' do
+ expect(File).to receive(:write).with('scores.csv', 'artifact content')
+
+ expect { execute }.to output(expected_output).to_stdout
+ end
+
+ context 'when options come from environment variables' do
+ before do
+ stub_env('PROJECT_TOKEN_FOR_CI_SCRIPTS_API_USAGE', 'asdf1234')
+ stub_env('CI_PROJECT_ID', 'upstream/project')
+ stub_env('CI_PIPELINE_ID', '123')
+ stub_env('CI_API_V4_URL', 'https://gitlab.com/api/v4')
+ stub_env('DOWNSTREAM_PROJECT', 'downstream/project')
+ stub_env('DOWNSTREAM_JOB_NAME', 'test-job')
+ stub_env('TRIGGER_JOB_NAME', 'trigger-job')
+ stub_env('DOWNSTREAM_JOB_ARTIFACT_PATH', 'scores-DOWNSTREAM_JOB_ID.csv')
+ stub_env('OUTPUT_ARTIFACT_PATH', 'scores.csv')
+
+ stub_const('API::DEFAULT_OPTIONS', {
+ project: ENV['CI_PROJECT_ID'],
+ pipeline_id: ENV['CI_PIPELINE_ID'],
+ api_token: ENV['PROJECT_TOKEN_FOR_CI_SCRIPTS_API_USAGE'],
+ endpoint: ENV['CI_API_V4_URL']
+ })
+ end
+
+ it 'uses the environment variable values' do
+ options = described_class.options_from_env
+
+ expect(File).to receive(:write)
+ expect { described_class.new(options).execute }.to output(expected_output).to_stdout
+ end
+ end
+
+ context 'when the downstream pipeline cannot be found' do
+ let(:pipeline_bridges) do
+ double('pipeline_bridges', auto_paginate: [double(name: 'trigger-job', downstream_pipeline: nil)])
+ end
+
+ it 'aborts' do
+ expect(File).not_to receive(:write)
+ expect { described_class.new(options).execute }
+ .to output(
+ %r{Could not find downstream pipeline triggered via trigger-job in project downstream/project}
+ ).to_stderr
+ .and raise_error(SystemExit)
+ end
+ end
+
+ context 'when the downstream job cannot be found' do
+ let(:job) { double('job', name: 'foo') }
+
+ it 'aborts' do
+ expect(File).not_to receive(:write)
+ expect { described_class.new(options).execute }
+ .to output(
+ %r{Could not find job with name 'test-job' in https://example.com/pipelines/111}
+ ).to_stderr
+ .and raise_error(SystemExit)
+ end
+ end
+
+ context 'when the downstream artifact cannot be found' do
+ let(:artifact_response) { 'error' }
+
+ it 'aborts' do
+ expect(File).not_to receive(:write)
+ expect { described_class.new(options).execute }
+ .to output(
+ %r{Could not download artifact. Request returned: error}
+ ).to_stderr
+ .and raise_error(SystemExit)
+ end
+ end
+ end
+
+ context 'when called without an API token' do
+ let(:options) do
+ {
+ endpoint: 'https://gitlab.com/api/v4',
+ upstream_project: 'upstream/project',
+ upstream_pipeline_id: 123,
+ downstream_project: 'downstream/project',
+ downstream_job_name: 'test-job',
+ trigger_job_name: 'trigger-job',
+ downstream_artifact_path: 'scores-DOWNSTREAM_JOB_ID.csv',
+ output_artifact_path: 'scores.csv'
+ }
+ end
+
+ it 'raises an error' do
+ expect { described_class.new(options) }.to raise_error(ArgumentError)
+ end
+ end
+end
+
+# rubocop:enable RSpec/VerifiedDoubles
diff --git a/workhorse/config_test.go b/workhorse/config_test.go
index 64f0a24d148..c1fe1652a45 100644
--- a/workhorse/config_test.go
+++ b/workhorse/config_test.go
@@ -1,17 +1,22 @@
package main
import (
+ "bytes"
"flag"
+ "fmt"
"io"
"net/url"
"os"
+ "path/filepath"
"testing"
"time"
+ "github.com/BurntSushi/toml"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/config"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/queueing"
+ "gitlab.com/gitlab-org/gitlab/workhorse/internal/testhelper"
"gitlab.com/gitlab-org/gitlab/workhorse/internal/upstream"
)
@@ -284,3 +289,387 @@ func TestConfigFlagParsing(t *testing.T) {
}
require.Equal(t, expectedCfg, cfg)
}
+
+func TestLoadConfigCommand(t *testing.T) {
+ t.Parallel()
+
+ modifyDefaultConfig := func(modify func(cfg *config.Config)) config.Config {
+ f, err := os.CreateTemp("", "workhorse-config-test")
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ defer os.Remove(f.Name())
+ })
+
+ cfg := &config.Config{}
+
+ modify(cfg)
+ return *cfg
+ }
+
+ writeScript := func(t *testing.T, script string) string {
+ return testhelper.WriteExecutable(t,
+ filepath.Join(testhelper.TempDir(t), "script"),
+ []byte("#!/bin/sh\n"+script),
+ )
+ }
+
+ type setupData struct {
+ cfg config.Config
+ expectedErr string
+ expectedCfg config.Config
+ }
+
+ for _, tc := range []struct {
+ desc string
+ setup func(t *testing.T) setupData
+ }{
+ {
+ desc: "nonexistent executable",
+ setup: func(t *testing.T) setupData {
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: "/does/not/exist",
+ },
+ expectedErr: "running config command: fork/exec /does/not/exist: no such file or directory",
+ }
+ },
+ },
+ {
+ desc: "command points to non-executable file",
+ setup: func(t *testing.T) setupData {
+ cmd := filepath.Join(testhelper.TempDir(t), "script")
+ require.NoError(t, os.WriteFile(cmd, nil, 0o600))
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedErr: fmt.Sprintf(
+ "running config command: fork/exec %s: permission denied", cmd,
+ ),
+ }
+ },
+ },
+ {
+ desc: "executable returns error",
+ setup: func(t *testing.T) setupData {
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: writeScript(t, "echo error >&2 && exit 1"),
+ },
+ expectedErr: "running config command: exit status 1, stderr: \"error\\n\"",
+ }
+ },
+ },
+ {
+ desc: "invalid JSON",
+ setup: func(t *testing.T) setupData {
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: writeScript(t, "echo 'this is not json'"),
+ },
+ expectedErr: "unmarshalling generated config: invalid character 'h' in literal true (expecting 'r')",
+ }
+ },
+ },
+ {
+ desc: "mixed stdout and stderr",
+ setup: func(t *testing.T) setupData {
+ // We want to verify that we're able to correctly parse the output
+ // even if the process writes to both its stdout and stderr.
+ cmd := writeScript(t, "echo error >&2 && echo '{}'")
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ }),
+ }
+ },
+ },
+ {
+ desc: "empty script",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, "echo '{}'")
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ }),
+ }
+ },
+ },
+ {
+ desc: "unknown value",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, `echo '{"key_does_not_exist":"value"}'`)
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ }),
+ }
+ },
+ },
+ {
+ desc: "generated value",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, `echo '{"shutdown_timeout": "100s"}'`)
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ cfg.ShutdownTimeout = config.TomlDuration{Duration: 100 * time.Second}
+ }),
+ }
+ },
+ },
+ {
+ desc: "overridden value",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, `echo '{"shutdown_timeout": "100s"}'`)
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ ShutdownTimeout: config.TomlDuration{Duration: 1 * time.Second},
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ cfg.ShutdownTimeout = config.TomlDuration{Duration: 100 * time.Second}
+ }),
+ }
+ },
+ },
+ {
+ desc: "mixed configuration",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, `echo '{"redis": { "url": "redis://redis.example.com", "db": 1 } }'`)
+ redisURL, err := url.Parse("redis://redis.example.com")
+ require.NoError(t, err)
+ db := 1
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ ImageResizerConfig: config.DefaultImageResizerConfig,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ cfg.Redis = &config.RedisConfig{
+ URL: config.TomlURL{URL: *redisURL},
+ DB: &db,
+ }
+ cfg.ImageResizerConfig = config.DefaultImageResizerConfig
+ }),
+ }
+ },
+ },
+ {
+ desc: "subsections are being merged",
+ setup: func(t *testing.T) setupData {
+ redisURL, err := url.Parse("redis://redis.example.com")
+ require.NoError(t, err)
+ origDB := 1
+ scriptDB := 5
+
+ cmd := writeScript(t, `cat <<-EOF
+ {
+ "redis": {
+ "url": "redis://redis.example.com",
+ "db": 5
+ }
+ }
+ EOF
+ `)
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ Redis: &config.RedisConfig{
+ URL: config.TomlURL{URL: *redisURL},
+ DB: &origDB,
+ },
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ cfg.Redis = &config.RedisConfig{
+ URL: config.TomlURL{URL: *redisURL},
+ DB: &scriptDB,
+ }
+ }),
+ }
+ },
+ },
+ {
+ desc: "listener config",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, `cat <<-EOF
+ {
+ "listeners": [
+ {
+ "network": "tcp",
+ "addr": "127.0.0.1:3443",
+ "tls": {
+ "certificate": "/path/to/certificate",
+ "key": "/path/to/private/key"
+ }
+ }
+ ]
+ }
+ EOF
+ `)
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ cfg.Listeners = []config.ListenerConfig{
+ {
+ Network: "tcp",
+ Addr: "127.0.0.1:3443",
+ Tls: &config.TlsConfig{
+ Certificate: "/path/to/certificate",
+ Key: "/path/to/private/key",
+ },
+ },
+ }
+ }),
+ }
+ },
+ },
+ {
+ desc: "S3 object storage config",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, `cat <<-EOF
+ {
+ "object_storage": {
+ "provider": "AWS",
+ "s3": {
+ "aws_access_key_id": "MY-AWS-ACCESS-KEY",
+ "aws_secret_access_key": "MY-AWS-SECRET-ACCESS-KEY"
+ }
+ }
+ }
+ EOF
+ `)
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ cfg.ObjectStorageCredentials = config.ObjectStorageCredentials{
+ Provider: "AWS",
+ S3Credentials: config.S3Credentials{
+ AwsAccessKeyID: "MY-AWS-ACCESS-KEY",
+ AwsSecretAccessKey: "MY-AWS-SECRET-ACCESS-KEY",
+ },
+ }
+ }),
+ }
+ },
+ },
+ {
+ desc: "Azure object storage config",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, `cat <<-EOF
+ {
+ "object_storage": {
+ "provider": "AzureRM",
+ "azurerm": {
+ "azure_storage_account_name": "MY-STORAGE-ACCOUNT",
+ "azure_storage_access_key": "MY-STORAGE-ACCESS-KEY"
+ }
+ }
+ }
+ EOF
+ `)
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ cfg.ObjectStorageCredentials = config.ObjectStorageCredentials{
+ Provider: "AzureRM",
+ AzureCredentials: config.AzureCredentials{
+ AccountName: "MY-STORAGE-ACCOUNT",
+ AccountKey: "MY-STORAGE-ACCESS-KEY",
+ },
+ }
+ }),
+ }
+ },
+ },
+ {
+ desc: "Google Cloud object storage config",
+ setup: func(t *testing.T) setupData {
+ cmd := writeScript(t, `cat <<-EOF
+ {
+ "object_storage": {
+ "provider": "Google",
+ "google": {
+ "google_application_default": true,
+ "google_json_key_string": "MY-GOOGLE-JSON-KEY"
+ }
+ }
+ }
+ EOF
+ `)
+
+ return setupData{
+ cfg: config.Config{
+ ConfigCommand: cmd,
+ },
+ expectedCfg: modifyDefaultConfig(func(cfg *config.Config) {
+ cfg.ConfigCommand = cmd
+ cfg.ObjectStorageCredentials = config.ObjectStorageCredentials{
+ Provider: "Google",
+ GoogleCredentials: config.GoogleCredentials{
+ ApplicationDefault: true,
+ JSONKeyString: "MY-GOOGLE-JSON-KEY",
+ },
+ }
+ }),
+ }
+ },
+ },
+ } {
+ tc := tc
+
+ t.Run(tc.desc, func(t *testing.T) {
+ t.Parallel()
+
+ setup := tc.setup(t)
+
+ var cfgBuffer bytes.Buffer
+ require.NoError(t, toml.NewEncoder(&cfgBuffer).Encode(setup.cfg))
+
+ cfg, err := config.LoadConfig(cfgBuffer.String())
+ // We can't use `require.Equal()` for the error as it's basically impossible
+ // to reproduce the exact `exec.ExitError`.
+ if setup.expectedErr != "" {
+ require.EqualError(t, err, setup.expectedErr)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, setup.expectedCfg, *cfg)
+ }
+ })
+ }
+}
diff --git a/workhorse/internal/config/config.go b/workhorse/internal/config/config.go
index 3b928d42fe1..d84bab16541 100644
--- a/workhorse/internal/config/config.go
+++ b/workhorse/internal/config/config.go
@@ -2,10 +2,13 @@ package config
import (
"context"
+ "encoding/json"
+ "errors"
"fmt"
"math"
"net/url"
"os"
+ "os/exec"
"runtime"
"strings"
"time"
@@ -30,6 +33,10 @@ func (u *TomlURL) UnmarshalText(text []byte) error {
return err
}
+func (u *TomlURL) MarshalText() ([]byte, error) {
+ return []byte(u.String()), nil
+}
+
type TomlDuration struct {
time.Duration
}
@@ -40,12 +47,16 @@ func (d *TomlDuration) UnmarshalText(text []byte) error {
return err
}
+func (d TomlDuration) MarshalText() ([]byte, error) {
+ return []byte(d.String()), nil
+}
+
type ObjectStorageCredentials struct {
Provider string
- S3Credentials S3Credentials `toml:"s3"`
- AzureCredentials AzureCredentials `toml:"azurerm"`
- GoogleCredentials GoogleCredentials `toml:"google"`
+ S3Credentials S3Credentials `toml:"s3" json:"s3"`
+ AzureCredentials AzureCredentials `toml:"azurerm" json:"azurerm"`
+ GoogleCredentials GoogleCredentials `toml:"google" json:"google"`
}
type ObjectStorageConfig struct {
@@ -53,8 +64,9 @@ type ObjectStorageConfig struct {
}
type S3Credentials struct {
- AwsAccessKeyID string `toml:"aws_access_key_id"`
- AwsSecretAccessKey string `toml:"aws_secret_access_key"`
+ AwsAccessKeyID string `toml:"aws_access_key_id" json:"aws_access_key_id"`
+ AwsSecretAccessKey string `toml:"aws_secret_access_key" json:"aws_secret_access_key"`
+ AwsSessionToken string `toml:"aws_session_token" json:"aws_session_token"`
}
type S3Config struct {
@@ -72,14 +84,14 @@ type GoCloudConfig struct {
}
type AzureCredentials struct {
- AccountName string `toml:"azure_storage_account_name"`
- AccountKey string `toml:"azure_storage_access_key"`
+ AccountName string `toml:"azure_storage_account_name" json:"azure_storage_account_name"`
+ AccountKey string `toml:"azure_storage_access_key" json:"azure_storage_access_key"`
}
type GoogleCredentials struct {
- ApplicationDefault bool `toml:"google_application_default"`
- JSONKeyString string `toml:"google_json_key_string"`
- JSONKeyLocation string `toml:"google_json_key_location"`
+ ApplicationDefault bool `toml:"google_application_default" json:"google_application_default"`
+ JSONKeyString string `toml:"google_json_key_string" json:"google_json_key_string"`
+ JSONKeyLocation string `toml:"google_json_key_location" json:"google_json_key_location"`
}
type RedisConfig struct {
@@ -94,25 +106,27 @@ type RedisConfig struct {
}
type ImageResizerConfig struct {
- MaxScalerProcs uint32 `toml:"max_scaler_procs"`
- MaxFilesize uint64 `toml:"max_filesize"`
+ MaxScalerProcs uint32 `toml:"max_scaler_procs" json:"max_scaler_procs"`
+ MaxScalerMem uint64 `toml:"max_scaler_mem" json:"max_scaler_mem"`
+ MaxFilesize uint64 `toml:"max_filesize" json:"max_filesize"`
}
type TlsConfig struct {
- Certificate string `toml:"certificate"`
- Key string `toml:"key"`
- MinVersion string `toml:"min_version"`
- MaxVersion string `toml:"max_version"`
+ Certificate string `toml:"certificate" json:"certificate"`
+ Key string `toml:"key" json:"key"`
+ MinVersion string `toml:"min_version" json:"min_version"`
+ MaxVersion string `toml:"max_version" json:"max_version"`
}
type ListenerConfig struct {
- Network string `toml:"network"`
- Addr string `toml:"addr"`
- Tls *TlsConfig `toml:"tls"`
+ Network string `toml:"network" json:"network"`
+ Addr string `toml:"addr" json:"addr"`
+ Tls *TlsConfig `toml:"tls" json:"tls"`
}
type Config struct {
- Redis *RedisConfig `toml:"redis"`
+ ConfigCommand string `toml:"config_command,omitempty" json:"config_command"`
+ Redis *RedisConfig `toml:"redis" json:"redis"`
Backend *url.URL `toml:"-"`
CableBackend *url.URL `toml:"-"`
Version string `toml:"-"`
@@ -126,15 +140,15 @@ type Config struct {
APIQueueTimeout time.Duration `toml:"-"`
APICILongPollingDuration time.Duration `toml:"-"`
ObjectStorageConfig ObjectStorageConfig `toml:"-"`
- ObjectStorageCredentials ObjectStorageCredentials `toml:"object_storage"`
+ ObjectStorageCredentials ObjectStorageCredentials `toml:"object_storage" json:"object_storage"`
PropagateCorrelationID bool `toml:"-"`
- ImageResizerConfig ImageResizerConfig `toml:"image_resizer"`
- AltDocumentRoot string `toml:"alt_document_root"`
- ShutdownTimeout TomlDuration `toml:"shutdown_timeout"`
- TrustedCIDRsForXForwardedFor []string `toml:"trusted_cidrs_for_x_forwarded_for"`
- TrustedCIDRsForPropagation []string `toml:"trusted_cidrs_for_propagation"`
- Listeners []ListenerConfig `toml:"listeners"`
- MetricsListener *ListenerConfig `toml:"metrics_listener"`
+ ImageResizerConfig ImageResizerConfig `toml:"image_resizer" json:"image_resizer"`
+ AltDocumentRoot string `toml:"alt_document_root" json:"alt_document_root"`
+ ShutdownTimeout TomlDuration `toml:"shutdown_timeout" json:"shutdown_timeout"`
+ TrustedCIDRsForXForwardedFor []string `toml:"trusted_cidrs_for_x_forwarded_for" json:"trusted_cidrs_for_x_forwarded_for"`
+ TrustedCIDRsForPropagation []string `toml:"trusted_cidrs_for_propagation" json:"trusted_cidrs_for_propagation"`
+ Listeners []ListenerConfig `toml:"listeners" json:"listeners"`
+ MetricsListener *ListenerConfig `toml:"metrics_listener" json:"metrics_listener"`
}
var DefaultImageResizerConfig = ImageResizerConfig{
@@ -149,6 +163,22 @@ func LoadConfig(data string) (*Config, error) {
return nil, err
}
+ if cfg.ConfigCommand != "" {
+ output, err := exec.Command(cfg.ConfigCommand).Output()
+ if err != nil {
+ var exitErr *exec.ExitError
+ if errors.As(err, &exitErr) {
+ return cfg, fmt.Errorf("running config command: %w, stderr: %q", err, string(exitErr.Stderr))
+ }
+
+ return cfg, fmt.Errorf("running config command: %w", err)
+ }
+
+ if err := json.Unmarshal(output, &cfg); err != nil {
+ return cfg, fmt.Errorf("unmarshalling generated config: %w", err)
+ }
+ }
+
return cfg, nil
}
diff --git a/workhorse/internal/testhelper/testhelper.go b/workhorse/internal/testhelper/testhelper.go
index 6c944def92d..607c02d1da1 100644
--- a/workhorse/internal/testhelper/testhelper.go
+++ b/workhorse/internal/testhelper/testhelper.go
@@ -1,6 +1,7 @@
package testhelper
import (
+ "bytes"
"errors"
"fmt"
"io"
@@ -8,8 +9,10 @@ import (
"net/http/httptest"
"os"
"path"
+ "path/filepath"
"regexp"
"runtime"
+ "syscall"
"testing"
"github.com/golang-jwt/jwt/v5"
@@ -165,3 +168,74 @@ func SetupStaticFileHelper(t *testing.T, fpath, content, directory string) strin
return absDocumentRoot
}
+
+// TempDir is a wrapper around os.MkdirTemp that provides a cleanup function.
+func TempDir(tb testing.TB) string {
+ tmpDir, err := os.MkdirTemp("", "workhorse-tmp-*")
+ require.NoError(tb, err)
+ tb.Cleanup(func() {
+ require.NoError(tb, os.RemoveAll(tmpDir))
+ })
+
+ return tmpDir
+}
+
+// MustClose calls Close() on the Closer and fails the test in case it returns
+// an error. This function is useful when closing via `defer`, as a simple
+// `defer require.NoError(t, closer.Close())` would cause `closer.Close()` to
+// be executed early already.
+func MustClose(tb testing.TB, closer io.Closer) {
+ require.NoError(tb, closer.Close())
+}
+
+// WriteExecutable ensures that the parent directory exists, and writes an executable with provided
+// content. The executable must not exist previous to writing it. Returns the path of the written
+// executable.
+func WriteExecutable(tb testing.TB, path string, content []byte) string {
+ dir := filepath.Dir(path)
+ require.NoError(tb, os.MkdirAll(dir, 0o755))
+ tb.Cleanup(func() {
+ require.NoError(tb, os.RemoveAll(dir))
+ })
+
+ // Open the file descriptor and write the script into it. It may happen that any other
+ // Goroutine forks while we hold this writeable file descriptor, and as a consequence we
+ // leak it into the other process. Subsequently, even if we close the file descriptor
+ // ourselves this other process may still hold on to the writeable file descriptor. The
+ // result is that calls to execve(3P) on our just-written file will fail with ETXTBSY,
+ // which is raised when trying to execute a file which is still open to be written to.
+ //
+ // We thus need to perform file locking to ensure that all writeable references to this
+ // file have been closed before returning.
+ executable, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o700)
+ require.NoError(tb, err)
+ _, err = io.Copy(executable, bytes.NewReader(content))
+ require.NoError(tb, err)
+
+ // We now lock the file descriptor for exclusive access. If there was a forked process
+ // holding the writeable file descriptor at this point in time, then it would refer to the
+ // same file descriptor and thus be locked for exclusive access, as well. If we fork after
+ // creating the lock and before closing the writeable file descriptor, then the dup'd file
+ // descriptor would automatically inherit the lock.
+ //
+ // No matter what, after this step any file descriptors referring to this writeable file
+ // descriptor will be exclusively locked.
+ require.NoError(tb, syscall.Flock(int(executable.Fd()), syscall.LOCK_EX))
+
+ // We now close this file. The file will be automatically unlocked as soon as all
+ // references to this file descriptor are closed.
+ MustClose(tb, executable)
+
+ // We now open the file again, but this time only for reading.
+ executable, err = os.Open(path)
+ require.NoError(tb, err)
+
+ // And this time, we try to acquire a shared lock on this file. This call will block until
+ // the exclusive file lock on the above writeable file descriptor has been dropped. So as
+ // soon as we're able to acquire the lock we know that there cannot be any open writeable
+ // file descriptors for this file anymore, and thus we won't get ETXTBSY anymore.
+ require.NoError(tb, syscall.Flock(int(executable.Fd()), syscall.LOCK_SH))
+ MustClose(tb, executable)
+
+ return path
+}
diff --git a/yarn.lock b/yarn.lock
index aba17a95e1e..ba98c5b1c25 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1283,10 +1283,10 @@
core-js "^3.29.1"
mitt "^3.0.1"
-"@gitlab/eslint-plugin@19.3.0":
- version "19.3.0"
- resolved "https://registry.yarnpkg.com/@gitlab/eslint-plugin/-/eslint-plugin-19.3.0.tgz#146ff28bd3817634261b2705838eeaec415b6ca4"
- integrity sha512-rWxmLRnJDymlc/AF9/UPQyVnjZeu6bNrhElD9nw0WDndSpWVTPR5RWoEcmmBE4OhG5M7Foe+fVVd3q5zhZU0HQ==
+"@gitlab/eslint-plugin@19.4.0":
+ version "19.4.0"
+ resolved "https://registry.yarnpkg.com/@gitlab/eslint-plugin/-/eslint-plugin-19.4.0.tgz#cffabc4a41a3e15491eaee87f282fef05d244b87"
+ integrity sha512-hFchl6UdzoOTFFiTx8myoJxe5SZ43LeC/+Er9tmIZ8HX3OZ6SFHTHaz61g2g4vvcwVHDMaAd5ncB6RCshlgv2A==
dependencies:
eslint-config-airbnb-base "^15.0.0"
eslint-config-prettier "^6.10.0"
@@ -1321,10 +1321,10 @@
resolved "https://registry.yarnpkg.com/@gitlab/svgs/-/svgs-3.74.0.tgz#b6b41be65b9e70378c0cef0435f96edd5467e759"
integrity sha512-eHoywPSLrYb+I/IYGapei2Tum5vLtgWkFxN0fxmUUAnBnxFSA+67aheI33kQVV3WjANuZGkglfPBX3QAmN8BLA==
-"@gitlab/ui@^72.5.0":
- version "72.5.0"
- resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-72.5.0.tgz#ceb658391d171fac9a74089e01337a569fe15815"
- integrity sha512-UTEJUMzIt/jRDUmKL4yHBORFpNISffJn2beYIlP1LcYsY3J2ehOh1JaMQtnukgcEL3kIk0ijnTCjygN5Djatmw==
+"@gitlab/ui@^72.5.1":
+ version "72.5.1"
+ resolved "https://registry.yarnpkg.com/@gitlab/ui/-/ui-72.5.1.tgz#8f0ea0cabaa4be41dbf5f78b18f635117ebd41de"
+ integrity sha512-KhagcFu6RXDCweybecwBbx3Q3DvO26hkRj32jFsGA8H4TJ/5VLZC8Gwct7+IrtuYWijprzRKoAaoeXkrCcBdbw==
dependencies:
"@floating-ui/dom" "1.4.3"
bootstrap-vue "2.23.1"