Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGitLab Bot <gitlab-bot@gitlab.com>2020-07-20 15:26:25 +0300
committerGitLab Bot <gitlab-bot@gitlab.com>2020-07-20 15:26:25 +0300
commita09983ae35713f5a2bbb100981116d31ce99826e (patch)
tree2ee2af7bd104d57086db360a7e6d8c9d5d43667a /config/initializers
parent18c5ab32b738c0b6ecb4d0df3994000482f34bd8 (diff)
Add latest changes from gitlab-org/gitlab@13-2-stable-ee
Diffstat (limited to 'config/initializers')
-rw-r--r--config/initializers/01_secret_token.rb12
-rw-r--r--config/initializers/0_inject_feature_flags.rb5
-rw-r--r--config/initializers/1_postgresql_only.rb2
-rw-r--r--config/initializers/1_settings.rb55
-rw-r--r--config/initializers/action_cable.rb8
-rw-r--r--config/initializers/action_dispatch_journey_formatter.rb4
-rw-r--r--config/initializers/actionpack_generate_old_csrf_token.rb33
-rw-r--r--config/initializers/active_record_schema_ignore_tables.rb3
-rw-r--r--config/initializers/config_initializers_active_record_locking.rb46
-rw-r--r--config/initializers/doorkeeper_openid_connect.rb4
-rw-r--r--config/initializers/flipper.rb1
-rw-r--r--config/initializers/grape_patch.rb31
-rw-r--r--config/initializers/lograge.rb1
-rw-r--r--config/initializers/multi_json.rb5
-rw-r--r--config/initializers/oj.rb4
-rw-r--r--config/initializers/postgres_partitioning.rb10
-rw-r--r--config/initializers/rack_attack.rb13
-rw-r--r--config/initializers/rack_timeout.rb2
-rw-r--r--config/initializers/stackprof.rb101
19 files changed, 219 insertions, 121 deletions
diff --git a/config/initializers/01_secret_token.rb b/config/initializers/01_secret_token.rb
index 8b96727a2a1..5949f463457 100644
--- a/config/initializers/01_secret_token.rb
+++ b/config/initializers/01_secret_token.rb
@@ -1,13 +1,5 @@
-# WARNING: If you add a new secret to this file, make sure you also
-# update Omnibus GitLab or updates will fail. Omnibus is responsible for
-# writing the `secrets.yml` file. If Omnibus doesn't know about a
-# secret, Rails will attempt to write to the file, but this will fail
-# because Rails doesn't have write access.
-#
-# As an example:
-# * https://gitlab.com/gitlab-org/gitlab-foss/merge_requests/27581
-# * https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests/3267
-#
+# WARNING: Before you make a change to secrets.yml, read the development guide for GitLab secrets
+# doc/development/application_secrets.md.
#
# This file needs to be loaded BEFORE any initializers that attempt to
# prepend modules that require access to secrets (e.g. EE's 0_as_concern.rb).
diff --git a/config/initializers/0_inject_feature_flags.rb b/config/initializers/0_inject_feature_flags.rb
new file mode 100644
index 00000000000..45e6546e294
--- /dev/null
+++ b/config/initializers/0_inject_feature_flags.rb
@@ -0,0 +1,5 @@
+# This needs to be loaded after
+# config/initializers/0_inject_enterprise_edition_module.rb
+
+Feature.register_feature_groups
+Feature.register_definitions
diff --git a/config/initializers/1_postgresql_only.rb b/config/initializers/1_postgresql_only.rb
index be771bebf47..415fc6f2cae 100644
--- a/config/initializers/1_postgresql_only.rb
+++ b/config/initializers/1_postgresql_only.rb
@@ -2,3 +2,5 @@
raise "PostgreSQL is the only supported database from GitLab 12.1" unless
Gitlab::Database.postgresql?
+
+Gitlab::Database.check_postgres_version_and_print_warning
diff --git a/config/initializers/1_settings.rb b/config/initializers/1_settings.rb
index 0afd43634c3..b7432c4cbe6 100644
--- a/config/initializers/1_settings.rb
+++ b/config/initializers/1_settings.rb
@@ -254,7 +254,7 @@ Settings.artifacts['storage_path'] = Settings.absolute(Settings.artifacts.values
# Settings.artifact['path'] is deprecated, use `storage_path` instead
Settings.artifacts['path'] = Settings.artifacts['storage_path']
Settings.artifacts['max_size'] ||= 100 # in megabytes
-Settings.artifacts['object_store'] = ObjectStoreSettings.parse(Settings.artifacts['object_store'])
+Settings.artifacts['object_store'] = ObjectStoreSettings.legacy_parse(Settings.artifacts['object_store'])
#
# Registry
@@ -325,7 +325,7 @@ Settings['external_diffs'] ||= Settingslogic.new({})
Settings.external_diffs['enabled'] = false if Settings.external_diffs['enabled'].nil?
Settings.external_diffs['when'] = 'always' if Settings.external_diffs['when'].nil?
Settings.external_diffs['storage_path'] = Settings.absolute(Settings.external_diffs['storage_path'] || File.join(Settings.shared['path'], 'external-diffs'))
-Settings.external_diffs['object_store'] = ObjectStoreSettings.parse(Settings.external_diffs['object_store'])
+Settings.external_diffs['object_store'] = ObjectStoreSettings.legacy_parse(Settings.external_diffs['object_store'])
#
# Git LFS
@@ -333,7 +333,7 @@ Settings.external_diffs['object_store'] = ObjectStoreSettings.parse(Settings.ext
Settings['lfs'] ||= Settingslogic.new({})
Settings.lfs['enabled'] = true if Settings.lfs['enabled'].nil?
Settings.lfs['storage_path'] = Settings.absolute(Settings.lfs['storage_path'] || File.join(Settings.shared['path'], "lfs-objects"))
-Settings.lfs['object_store'] = ObjectStoreSettings.parse(Settings.lfs['object_store'])
+Settings.lfs['object_store'] = ObjectStoreSettings.legacy_parse(Settings.lfs['object_store'])
#
# Uploads
@@ -341,18 +341,16 @@ Settings.lfs['object_store'] = ObjectStoreSettings.parse(Settings.lfs['object_st
Settings['uploads'] ||= Settingslogic.new({})
Settings.uploads['storage_path'] = Settings.absolute(Settings.uploads['storage_path'] || 'public')
Settings.uploads['base_dir'] = Settings.uploads['base_dir'] || 'uploads/-/system'
-Settings.uploads['object_store'] = ObjectStoreSettings.parse(Settings.uploads['object_store'])
+Settings.uploads['object_store'] = ObjectStoreSettings.legacy_parse(Settings.uploads['object_store'])
Settings.uploads['object_store']['remote_directory'] ||= 'uploads'
#
# Packages
#
-Gitlab.ee do
- Settings['packages'] ||= Settingslogic.new({})
- Settings.packages['enabled'] = true if Settings.packages['enabled'].nil?
- Settings.packages['storage_path'] = Settings.absolute(Settings.packages['storage_path'] || File.join(Settings.shared['path'], "packages"))
- Settings.packages['object_store'] = ObjectStoreSettings.parse(Settings.packages['object_store'])
-end
+Settings['packages'] ||= Settingslogic.new({})
+Settings.packages['enabled'] = true if Settings.packages['enabled'].nil?
+Settings.packages['storage_path'] = Settings.absolute(Settings.packages['storage_path'] || File.join(Settings.shared['path'], "packages"))
+Settings.packages['object_store'] = ObjectStoreSettings.legacy_parse(Settings.packages['object_store'])
#
# Dependency Proxy
@@ -361,7 +359,7 @@ Gitlab.ee do
Settings['dependency_proxy'] ||= Settingslogic.new({})
Settings.dependency_proxy['enabled'] = true if Settings.dependency_proxy['enabled'].nil?
Settings.dependency_proxy['storage_path'] = Settings.absolute(Settings.dependency_proxy['storage_path'] || File.join(Settings.shared['path'], "dependency_proxy"))
- Settings.dependency_proxy['object_store'] = ObjectStoreSettings.parse(Settings.dependency_proxy['object_store'])
+ Settings.dependency_proxy['object_store'] = ObjectStoreSettings.legacy_parse(Settings.dependency_proxy['object_store'])
# For first iteration dependency proxy uses Rails server to download blobs.
# To ensure acceptable performance we only allow feature to be used with
@@ -376,7 +374,7 @@ end
Settings['terraform_state'] ||= Settingslogic.new({})
Settings.terraform_state['enabled'] = true if Settings.terraform_state['enabled'].nil?
Settings.terraform_state['storage_path'] = Settings.absolute(Settings.terraform_state['storage_path'] || File.join(Settings.shared['path'], "terraform_state"))
-Settings.terraform_state['object_store'] = ObjectStoreSettings.parse(Settings.terraform_state['object_store'])
+Settings.terraform_state['object_store'] = ObjectStoreSettings.legacy_parse(Settings.terraform_state['object_store'])
#
# Mattermost
@@ -502,6 +500,12 @@ Settings.cron_jobs['users_create_statistics_worker']['job_class'] = 'Users::Crea
Settings.cron_jobs['authorized_project_update_periodic_recalculate_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['authorized_project_update_periodic_recalculate_worker']['cron'] ||= '45 1 * * 6'
Settings.cron_jobs['authorized_project_update_periodic_recalculate_worker']['job_class'] = 'AuthorizedProjectUpdate::PeriodicRecalculateWorker'
+Settings.cron_jobs['update_container_registry_info_worker'] ||= Settingslogic.new({})
+Settings.cron_jobs['update_container_registry_info_worker']['cron'] ||= '0 0 * * *'
+Settings.cron_jobs['update_container_registry_info_worker']['job_class'] = 'UpdateContainerRegistryInfoWorker'
+Settings.cron_jobs['postgres_dynamic_partitions_creator'] ||= Settingslogic.new({})
+Settings.cron_jobs['postgres_dynamic_partitions_creator']['cron'] ||= '21 */6 * * *'
+Settings.cron_jobs['postgres_dynamic_partitions_creator']['job_class'] ||= 'PartitionCreationWorker'
Gitlab.ee do
Settings.cron_jobs['adjourned_group_deletion_worker'] ||= Settingslogic.new({})
@@ -522,9 +526,6 @@ Gitlab.ee do
Settings.cron_jobs['geo_metrics_update_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_metrics_update_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['geo_metrics_update_worker']['job_class'] ||= 'Geo::MetricsUpdateWorker'
- Settings.cron_jobs['geo_migrated_local_files_clean_up_worker'] ||= Settingslogic.new({})
- Settings.cron_jobs['geo_migrated_local_files_clean_up_worker']['cron'] ||= '15 */6 * * *'
- Settings.cron_jobs['geo_migrated_local_files_clean_up_worker']['job_class'] ||= 'Geo::MigratedLocalFilesCleanUpWorker'
Settings.cron_jobs['geo_prune_event_log_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['geo_prune_event_log_worker']['cron'] ||= '*/5 * * * *'
Settings.cron_jobs['geo_prune_event_log_worker']['job_class'] ||= 'Geo::PruneEventLogWorker'
@@ -567,12 +568,27 @@ Gitlab.ee do
Settings.cron_jobs['elastic_index_initial_bulk_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['elastic_index_initial_bulk_cron_worker']['cron'] ||= '*/1 * * * *'
Settings.cron_jobs['elastic_index_initial_bulk_cron_worker']['job_class'] ||= 'ElasticIndexInitialBulkCronWorker'
+ Settings.cron_jobs['elastic_cluster_reindexing_cron_worker'] ||= Settingslogic.new({})
+ Settings.cron_jobs['elastic_cluster_reindexing_cron_worker']['cron'] ||= '*/10 * * * *'
+ Settings.cron_jobs['elastic_cluster_reindexing_cron_worker']['job_class'] ||= 'ElasticClusterReindexingCronWorker'
Settings.cron_jobs['sync_seat_link_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['sync_seat_link_worker']['cron'] ||= "#{rand(60)} 0 * * *"
Settings.cron_jobs['sync_seat_link_worker']['job_class'] = 'SyncSeatLinkWorker'
Settings.cron_jobs['web_application_firewall_metrics_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['web_application_firewall_metrics_worker']['cron'] ||= '0 1 * * 0'
Settings.cron_jobs['web_application_firewall_metrics_worker']['job_class'] = 'IngressModsecurityCounterMetricsWorker'
+ Settings.cron_jobs['users_create_statistics_worker'] ||= Settingslogic.new({})
+ Settings.cron_jobs['users_create_statistics_worker']['cron'] ||= '2 15 * * *'
+ Settings.cron_jobs['users_create_statistics_worker']['job_class'] = 'Users::CreateStatisticsWorker'
+ Settings.cron_jobs['network_policy_metrics_worker'] ||= Settingslogic.new({})
+ Settings.cron_jobs['network_policy_metrics_worker']['cron'] ||= '0 3 * * 0'
+ Settings.cron_jobs['network_policy_metrics_worker']['job_class'] = 'NetworkPolicyMetricsWorker'
+ Settings.cron_jobs['iterations_update_status_worker'] ||= Settingslogic.new({})
+ Settings.cron_jobs['iterations_update_status_worker']['cron'] ||= '5 0 * * *'
+ Settings.cron_jobs['iterations_update_status_worker']['job_class'] = 'IterationsUpdateStatusWorker'
+ Settings.cron_jobs['vulnerability_statistics_schedule_worker'] ||= Settingslogic.new({})
+ Settings.cron_jobs['vulnerability_statistics_schedule_worker']['cron'] ||= '15 1 * * *'
+ Settings.cron_jobs['vulnerability_statistics_schedule_worker']['job_class'] = 'Vulnerabilities::Statistics::ScheduleWorker'
end
#
@@ -598,6 +614,9 @@ Settings.gitlab_shell['owner_group'] ||= Settings.gitlab.user
Settings.gitlab_shell['ssh_path_prefix'] ||= Settings.__send__(:build_gitlab_shell_ssh_path_prefix)
Settings.gitlab_shell['git_timeout'] ||= 10800
+# Object storage
+ObjectStoreSettings.new(Settings).parse!
+
#
# Workhorse
#
@@ -735,12 +754,6 @@ Settings.webpack.dev_server['host'] ||= 'localhost'
Settings.webpack.dev_server['port'] ||= 3808
#
-# ActionCable settings
-#
-Settings['action_cable'] ||= Settingslogic.new({})
-Settings.action_cable['worker_pool_size'] ||= 4
-
-#
# Monitoring settings
#
Settings['monitoring'] ||= Settingslogic.new({})
diff --git a/config/initializers/action_cable.rb b/config/initializers/action_cable.rb
index c549dd45ad9..5530e7d64a2 100644
--- a/config/initializers/action_cable.rb
+++ b/config/initializers/action_cable.rb
@@ -3,11 +3,11 @@
require 'action_cable/subscription_adapter/redis'
Rails.application.configure do
- # We only mount the ActionCable engine in tests where we run it in-app
- # For other environments, we run it on a standalone Puma server
- config.action_cable.mount_path = Rails.env.test? ? '/-/cable' : nil
+ # Mount the ActionCable engine when in-app mode is enabled
+ config.action_cable.mount_path = Gitlab::ActionCable::Config.in_app? ? '/-/cable' : nil
+
config.action_cable.url = Gitlab::Utils.append_path(Gitlab.config.gitlab.relative_url_root, '/-/cable')
- config.action_cable.worker_pool_size = Gitlab.config.action_cable.worker_pool_size
+ config.action_cable.worker_pool_size = Gitlab::ActionCable::Config.worker_pool_size
end
# https://github.com/rails/rails/blob/bb5ac1623e8de08c1b7b62b1368758f0d3bb6379/actioncable/lib/action_cable/subscription_adapter/redis.rb#L18
diff --git a/config/initializers/action_dispatch_journey_formatter.rb b/config/initializers/action_dispatch_journey_formatter.rb
index 93cf407c73c..108fb2e5012 100644
--- a/config/initializers/action_dispatch_journey_formatter.rb
+++ b/config/initializers/action_dispatch_journey_formatter.rb
@@ -9,8 +9,8 @@ module ActionDispatch
module Path
class Pattern
def requirements_for_missing_keys_check
- @requirements_for_missing_keys_check ||= requirements.each_with_object({}) do |(key, regex), hash|
- hash[key] = /\A#{regex}\Z/
+ @requirements_for_missing_keys_check ||= requirements.transform_values do |regex|
+ /\A#{regex}\Z/
end
end
end
diff --git a/config/initializers/actionpack_generate_old_csrf_token.rb b/config/initializers/actionpack_generate_old_csrf_token.rb
deleted file mode 100644
index 6367a1d4d59..00000000000
--- a/config/initializers/actionpack_generate_old_csrf_token.rb
+++ /dev/null
@@ -1,33 +0,0 @@
-# frozen_string_literal: true
-
-module Gitlab
- module RequestForgeryProtectionPatch
- private
-
- # Patch to generate 6.0.3 tokens so that we do not have CSRF errors while
- # rolling out 6.0.3.1. This enables GitLab to have a mix of 6.0.3 and
- # 6.0.3.1 Rails servers
- #
- # 1. Deploy this patch with :global_csrf_token FF disabled.
- # 2. Once all Rails servers are on 6.0.3.1, enable :global_csrf_token FF.
- # 3. On GitLab 13.2, remove this patch
- def masked_authenticity_token(session, form_options: {})
- action, method = form_options.values_at(:action, :method)
-
- raw_token = if per_form_csrf_tokens && action && method
- action_path = normalize_action_path(action)
- per_form_csrf_token(session, action_path, method)
- else
- if Feature.enabled?(:global_csrf_token)
- global_csrf_token(session)
- else
- real_csrf_token(session)
- end
- end
-
- mask_token(raw_token)
- end
- end
-end
-
-ActionController::Base.include Gitlab::RequestForgeryProtectionPatch
diff --git a/config/initializers/active_record_schema_ignore_tables.rb b/config/initializers/active_record_schema_ignore_tables.rb
index 661135f8ade..8ac565f239e 100644
--- a/config/initializers/active_record_schema_ignore_tables.rb
+++ b/config/initializers/active_record_schema_ignore_tables.rb
@@ -1,2 +1,5 @@
# Ignore table used temporarily in background migration
ActiveRecord::SchemaDumper.ignore_tables = ["untracked_files_for_uploads"]
+
+# Ignore dynamically managed partitions in static application schema
+ActiveRecord::SchemaDumper.ignore_tables += ["#{Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA}.*"]
diff --git a/config/initializers/config_initializers_active_record_locking.rb b/config/initializers/config_initializers_active_record_locking.rb
deleted file mode 100644
index 9f9908283c6..00000000000
--- a/config/initializers/config_initializers_active_record_locking.rb
+++ /dev/null
@@ -1,46 +0,0 @@
-# frozen_string_literal: true
-
-# ensure ActiveRecord's version has been required already
-require 'active_record/locking/optimistic'
-
-# rubocop:disable Lint/RescueException
-module ActiveRecord
- module Locking
- module Optimistic
- private
-
- def _update_row(attribute_names, attempted_action = "update")
- return super unless locking_enabled?
-
- begin
- locking_column = self.class.locking_column
- previous_lock_value = read_attribute_before_type_cast(locking_column)
- attribute_names << locking_column
-
- self[locking_column] += 1
-
- # Patched because when `lock_version` is read as `0`, it may actually be `NULL` in the DB.
- possible_previous_lock_value = previous_lock_value.to_i == 0 ? [nil, 0] : previous_lock_value
-
- affected_rows = self.class.unscoped.where(
- locking_column => possible_previous_lock_value,
- self.class.primary_key => id_in_database
- ).update_all(
- attributes_with_values(attribute_names)
- )
-
- if affected_rows != 1
- raise ActiveRecord::StaleObjectError.new(self, attempted_action)
- end
-
- affected_rows
-
- # If something went wrong, revert the locking_column value.
- rescue Exception
- self[locking_column] = previous_lock_value.to_i
- raise
- end
- end
- end
- end
-end
diff --git a/config/initializers/doorkeeper_openid_connect.rb b/config/initializers/doorkeeper_openid_connect.rb
index fd5a62c39c6..3523776c4f7 100644
--- a/config/initializers/doorkeeper_openid_connect.rb
+++ b/config/initializers/doorkeeper_openid_connect.rb
@@ -37,10 +37,10 @@ Doorkeeper::OpenidConnect.configure do
# public email address (if present)
# This allows existing solutions built for GitLab's old behavior to keep
# working without modification.
- o.claim(:email) do |user, scopes|
+ o.claim(:email, response: [:id_token, :user_info]) do |user, scopes|
scopes.exists?(:email) ? user.email : user.public_email
end
- o.claim(:email_verified) do |user, scopes|
+ o.claim(:email_verified, response: [:id_token, :user_info]) do |user, scopes|
if scopes.exists?(:email)
user.primary_email_verified?
elsif user.public_email?
diff --git a/config/initializers/flipper.rb b/config/initializers/flipper.rb
deleted file mode 100644
index 80cab7273e5..00000000000
--- a/config/initializers/flipper.rb
+++ /dev/null
@@ -1 +0,0 @@
-Feature.register_feature_groups
diff --git a/config/initializers/grape_patch.rb b/config/initializers/grape_patch.rb
new file mode 100644
index 00000000000..a9ac0840541
--- /dev/null
+++ b/config/initializers/grape_patch.rb
@@ -0,0 +1,31 @@
+# frozen_string_literal: true
+# Monkey patch for Grape v1.4.0: https://github.com/ruby-grape/grape/pull/2088
+
+require 'grape'
+
+# rubocop:disable Gitlab/ModuleWithInstanceVariables
+module Grape
+ module DSL
+ module InsideRoute
+ def stream(value = nil)
+ return if value.nil? && @stream.nil?
+
+ header 'Content-Length', nil
+ header 'Transfer-Encoding', nil
+ header 'Cache-Control', 'no-cache' # Skips ETag generation (reading the response up front)
+
+ if value.is_a?(String)
+ file_body = Grape::ServeStream::FileBody.new(value)
+ @stream = Grape::ServeStream::StreamResponse.new(file_body)
+ elsif value.respond_to?(:each)
+ @stream = Grape::ServeStream::StreamResponse.new(value)
+ elsif !value.is_a?(NilClass)
+ raise ArgumentError, 'Stream object must respond to :each.'
+ else
+ @stream
+ end
+ end
+ end
+ end
+end
+# rubocop:enable Gitlab/ModuleWithInstanceVariables
diff --git a/config/initializers/lograge.rb b/config/initializers/lograge.rb
index 01353ad4ec1..42c97e4aebd 100644
--- a/config/initializers/lograge.rb
+++ b/config/initializers/lograge.rb
@@ -15,6 +15,7 @@ unless Gitlab::Runtime.sidekiq?
data[:db_duration_s] = Gitlab::Utils.ms_to_round_sec(data.delete(:db)) if data[:db]
data[:view_duration_s] = Gitlab::Utils.ms_to_round_sec(data.delete(:view)) if data[:view]
data[:duration_s] = Gitlab::Utils.ms_to_round_sec(data.delete(:duration)) if data[:duration]
+ data.merge!(::Gitlab::Metrics::Subscribers::ActiveRecord.db_counter_payload)
data
end
diff --git a/config/initializers/multi_json.rb b/config/initializers/multi_json.rb
new file mode 100644
index 00000000000..93a81d8320d
--- /dev/null
+++ b/config/initializers/multi_json.rb
@@ -0,0 +1,5 @@
+# frozen_string_literal: true
+
+# Explicitly set the JSON adapter used by MultiJson
+# Currently we want this to default to the existing json gem
+MultiJson.use(:json_gem)
diff --git a/config/initializers/oj.rb b/config/initializers/oj.rb
new file mode 100644
index 00000000000..3fa26259fc6
--- /dev/null
+++ b/config/initializers/oj.rb
@@ -0,0 +1,4 @@
+# frozen_string_literal: true
+
+# Ensure Oj runs in json-gem compatibility mode by default
+Oj.default_options = { mode: :rails }
diff --git a/config/initializers/postgres_partitioning.rb b/config/initializers/postgres_partitioning.rb
new file mode 100644
index 00000000000..6c8a72d9bd5
--- /dev/null
+++ b/config/initializers/postgres_partitioning.rb
@@ -0,0 +1,10 @@
+# frozen_string_literal: true
+
+# Make sure we have loaded partitioned models here
+# (even with eager loading disabled).
+
+begin
+ Gitlab::Database::Partitioning::PartitionCreator.new.create_partitions
+rescue ActiveRecord::ActiveRecordError, PG::Error
+ # ignore - happens when Rake tasks yet have to create a database, e.g. for testing
+end
diff --git a/config/initializers/rack_attack.rb b/config/initializers/rack_attack.rb
index 51b49bec864..b0778633199 100644
--- a/config/initializers/rack_attack.rb
+++ b/config/initializers/rack_attack.rb
@@ -68,6 +68,15 @@ class Rack::Attack
end
end
+ # Product analytics feature is in experimental stage.
+ # At this point we want to limit amount of events registered
+ # per application (aid stands for application id).
+ throttle('throttle_product_analytics_collector', limit: 100, period: 60) do |req|
+ if req.product_analytics_collector_request?
+ req.params['aid']
+ end
+ end
+
throttle('throttle_authenticated_web', Gitlab::Throttle.authenticated_web_options) do |req|
if req.web_request? &&
Gitlab::Throttle.settings.throttle_authenticated_web_enabled
@@ -128,6 +137,10 @@ class Rack::Attack
path =~ %r{^/-/(health|liveness|readiness)}
end
+ def product_analytics_collector_request?
+ path.start_with?('/-/collector/i')
+ end
+
def should_be_skipped?
api_internal_request? || health_check_request?
end
diff --git a/config/initializers/rack_timeout.rb b/config/initializers/rack_timeout.rb
index 5d5a5fcf980..e217398ee7d 100644
--- a/config/initializers/rack_timeout.rb
+++ b/config/initializers/rack_timeout.rb
@@ -10,8 +10,6 @@
# logged and we should fix the potential timeout issue in the code itself.
if Gitlab::Runtime.puma? && !Rails.env.test?
- require 'rack/timeout/base'
-
Rack::Timeout::Logger.level = Logger::ERROR
Gitlab::Application.configure do |config|
diff --git a/config/initializers/stackprof.rb b/config/initializers/stackprof.rb
new file mode 100644
index 00000000000..5497ff9a459
--- /dev/null
+++ b/config/initializers/stackprof.rb
@@ -0,0 +1,101 @@
+# frozen_string_literal: true
+
+# trigger stackprof by sending a SIGUSR2 signal
+#
+# default settings:
+# * collect raw samples
+# * sample at 100hz (every 10k microseconds)
+# * timeout profile after 30 seconds
+# * write to $TMPDIR/stackprof.$PID.$RAND.profile
+
+if Gitlab::Utils.to_boolean(ENV['STACKPROF_ENABLED'].to_s)
+ Gitlab::Cluster::LifecycleEvents.on_worker_start do
+ require 'stackprof'
+ require 'tmpdir'
+
+ Gitlab::AppJsonLogger.info "stackprof: listening on SIGUSR2 signal"
+
+ # create a pipe in order to propagate signal out of the signal handler
+ # see also: https://cr.yp.to/docs/selfpipe.html
+ read, write = IO.pipe
+
+ # create a separate thread that polls for signals on the pipe.
+ #
+ # this way we do not execute in signal handler context, which
+ # lifts restrictions and also serializes the calls in a thread-safe
+ # manner.
+ #
+ # it's very similar to a goroutine and channel design.
+ #
+ # another nice benefit of this method is that we can timeout the
+ # IO.select call, allowing the profile to automatically stop after
+ # a given interval (by default 30 seconds), avoiding unbounded memory
+ # growth from a profile that was started and never stopped.
+ t = Thread.new do
+ timeout_s = ENV['STACKPROF_TIMEOUT_S']&.to_i || 30
+ current_timeout_s = nil
+ loop do
+ got_value = IO.select([read], nil, nil, current_timeout_s)
+ read.getbyte if got_value
+
+ if StackProf.running?
+ stackprof_file_prefix = ENV['STACKPROF_FILE_PREFIX'] || Dir.tmpdir
+ stackprof_out_file = "#{stackprof_file_prefix}/stackprof.#{Process.pid}.#{SecureRandom.hex(6)}.profile"
+
+ Gitlab::AppJsonLogger.info(
+ event: "stackprof",
+ message: "stopping profile",
+ output_filename: stackprof_out_file,
+ pid: Process.pid,
+ timeout_s: timeout_s,
+ timed_out: got_value.nil?
+ )
+
+ StackProf.stop
+ StackProf.results(stackprof_out_file)
+ current_timeout_s = nil
+ else
+ Gitlab::AppJsonLogger.info(
+ event: "stackprof",
+ message: "starting profile",
+ pid: Process.pid
+ )
+
+ StackProf.start(
+ mode: :cpu,
+ raw: Gitlab::Utils.to_boolean(ENV['STACKPROF_RAW'] || 'true'),
+ interval: ENV['STACKPROF_INTERVAL_US']&.to_i || 10_000
+ )
+ current_timeout_s = timeout_s
+ end
+ end
+ end
+ t.abort_on_exception = true
+
+ # in the case of puma, this will override the existing SIGUSR2 signal handler
+ # that can be used to trigger a restart.
+ #
+ # puma cluster has two types of restarts:
+ # * SIGUSR1: phased restart
+ # * SIGUSR2: restart
+ #
+ # phased restart is not supported in our configuration, because we use
+ # preload_app. this means we will always perform a normal restart.
+ # additionally, phased restart is not supported when sending a SIGUSR2
+ # directly to a puma worker (as opposed to the master process).
+ #
+ # the result is that the behaviour of SIGUSR1 and SIGUSR2 is identical in
+ # our configuration, and we can always use a SIGUSR1 to perform a restart.
+ #
+ # thus, it is acceptable for us to re-appropriate the SIGUSR2 signal, and
+ # override the puma behaviour.
+ #
+ # see also:
+ # * https://github.com/puma/puma/blob/master/docs/signals.md#puma-signals
+ # * https://github.com/phusion/unicorn/blob/master/SIGNALS
+ # * https://github.com/mperham/sidekiq/wiki/Signals
+ Signal.trap('SIGUSR2') do
+ write.write('.')
+ end
+ end
+end