Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'config/initializers')
-rw-r--r--config/initializers/1_settings.rb11
-rw-r--r--config/initializers/7_prometheus_metrics.rb1
-rw-r--r--config/initializers/active_record_schema_ignore_tables.rb3
-rw-r--r--config/initializers/active_record_schema_versions.rb6
-rw-r--r--config/initializers/carrierwave_patch.rb53
-rw-r--r--config/initializers/database_config.rb55
-rw-r--r--config/initializers/direct_upload_support.rb20
-rw-r--r--config/initializers/elastic_client_setup.rb1
-rw-r--r--config/initializers/lograge.rb2
-rw-r--r--config/initializers/peek.rb6
-rw-r--r--config/initializers/postgres_partitioning.rb4
-rw-r--r--config/initializers/rails_host_authorization.rb2
-rw-r--r--config/initializers/sidekiq_cluster.rb8
-rw-r--r--config/initializers/stackprof.rb184
-rw-r--r--config/initializers/time_zone.rb7
-rw-r--r--config/initializers/validate_puma.rb2
-rw-r--r--config/initializers/zz_metrics.rb3
17 files changed, 244 insertions, 124 deletions
diff --git a/config/initializers/1_settings.rb b/config/initializers/1_settings.rb
index b7432c4cbe6..628d9c65ce0 100644
--- a/config/initializers/1_settings.rb
+++ b/config/initializers/1_settings.rb
@@ -83,6 +83,7 @@ Settings.omniauth['external_providers'] = [] if Settings.omniauth['external_prov
Settings.omniauth['block_auto_created_users'] = true if Settings.omniauth['block_auto_created_users'].nil?
Settings.omniauth['auto_link_ldap_user'] = false if Settings.omniauth['auto_link_ldap_user'].nil?
Settings.omniauth['auto_link_saml_user'] = false if Settings.omniauth['auto_link_saml_user'].nil?
+Settings.omniauth['auto_link_user'] = false if Settings.omniauth['auto_link_user'].nil?
Settings.omniauth['sync_profile_from_provider'] = false if Settings.omniauth['sync_profile_from_provider'].nil?
Settings.omniauth['sync_profile_attributes'] = ['email'] if Settings.omniauth['sync_profile_attributes'].nil?
@@ -283,6 +284,7 @@ Settings.sentry['clientside_dsn'] ||= nil
# Pages
#
Settings['pages'] ||= Settingslogic.new({})
+Settings['pages'] = ::Gitlab::Pages::Settings.new(Settings.pages) # For path access detection https://gitlab.com/gitlab-org/gitlab/-/issues/230702
Settings.pages['enabled'] = false if Settings.pages['enabled'].nil?
Settings.pages['access_control'] = false if Settings.pages['access_control'].nil?
Settings.pages['path'] = Settings.absolute(Settings.pages['path'] || File.join(Settings.shared['path'], "pages"))
@@ -422,6 +424,9 @@ Settings.cron_jobs['admin_email_worker']['job_class'] = 'AdminEmailWorker'
Settings.cron_jobs['personal_access_tokens_expiring_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['personal_access_tokens_expiring_worker']['cron'] ||= '0 1 * * *'
Settings.cron_jobs['personal_access_tokens_expiring_worker']['job_class'] = 'PersonalAccessTokens::ExpiringWorker'
+Settings.cron_jobs['personal_access_tokens_expired_notification_worker'] ||= Settingslogic.new({})
+Settings.cron_jobs['personal_access_tokens_expired_notification_worker']['cron'] ||= '0 2 * * *'
+Settings.cron_jobs['personal_access_tokens_expired_notification_worker']['job_class'] = 'PersonalAccessTokens::ExpiredNotificationWorker'
Settings.cron_jobs['repository_archive_cache_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['repository_archive_cache_worker']['cron'] ||= '0 * * * *'
Settings.cron_jobs['repository_archive_cache_worker']['job_class'] = 'RepositoryArchiveCacheWorker'
@@ -571,6 +576,9 @@ Gitlab.ee do
Settings.cron_jobs['elastic_cluster_reindexing_cron_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['elastic_cluster_reindexing_cron_worker']['cron'] ||= '*/10 * * * *'
Settings.cron_jobs['elastic_cluster_reindexing_cron_worker']['job_class'] ||= 'ElasticClusterReindexingCronWorker'
+ Settings.cron_jobs['elastic_remove_expired_namespace_subscriptions_from_index_cron_worker'] ||= Settingslogic.new({})
+ Settings.cron_jobs['elastic_remove_expired_namespace_subscriptions_from_index_cron_worker']['cron'] ||= '10 3 * * *'
+ Settings.cron_jobs['elastic_remove_expired_namespace_subscriptions_from_index_cron_worker']['job_class'] ||= 'ElasticRemoveExpiredNamespaceSubscriptionsFromIndexCronWorker'
Settings.cron_jobs['sync_seat_link_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['sync_seat_link_worker']['cron'] ||= "#{rand(60)} 0 * * *"
Settings.cron_jobs['sync_seat_link_worker']['job_class'] = 'SyncSeatLinkWorker'
@@ -589,6 +597,9 @@ Gitlab.ee do
Settings.cron_jobs['vulnerability_statistics_schedule_worker'] ||= Settingslogic.new({})
Settings.cron_jobs['vulnerability_statistics_schedule_worker']['cron'] ||= '15 1 * * *'
Settings.cron_jobs['vulnerability_statistics_schedule_worker']['job_class'] = 'Vulnerabilities::Statistics::ScheduleWorker'
+ Settings.cron_jobs['vulnerability_historical_statistics_deletion_worker'] ||= Settingslogic.new({})
+ Settings.cron_jobs['vulnerability_historical_statistics_deletion_worker']['cron'] ||= '15 3 * * *'
+ Settings.cron_jobs['vulnerability_historical_statistics_deletion_worker']['job_class'] = 'Vulnerabilities::HistoricalStatistics::DeletionWorker'
end
#
diff --git a/config/initializers/7_prometheus_metrics.rb b/config/initializers/7_prometheus_metrics.rb
index bb89850892e..cec1a213ed2 100644
--- a/config/initializers/7_prometheus_metrics.rb
+++ b/config/initializers/7_prometheus_metrics.rb
@@ -44,6 +44,7 @@ if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
Gitlab::Metrics::Samplers::RubySampler.initialize_instance.start
Gitlab::Metrics::Samplers::DatabaseSampler.initialize_instance.start
+ Gitlab::Metrics::Samplers::ThreadsSampler.initialize_instance.start
if Gitlab.ee? && Gitlab::Runtime.sidekiq?
Gitlab::Metrics::Samplers::GlobalSearchSampler.instance.start
diff --git a/config/initializers/active_record_schema_ignore_tables.rb b/config/initializers/active_record_schema_ignore_tables.rb
index 8ac565f239e..0a840bbf1d8 100644
--- a/config/initializers/active_record_schema_ignore_tables.rb
+++ b/config/initializers/active_record_schema_ignore_tables.rb
@@ -1,5 +1,2 @@
-# Ignore table used temporarily in background migration
-ActiveRecord::SchemaDumper.ignore_tables = ["untracked_files_for_uploads"]
-
# Ignore dynamically managed partitions in static application schema
ActiveRecord::SchemaDumper.ignore_tables += ["#{Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA}.*"]
diff --git a/config/initializers/active_record_schema_versions.rb b/config/initializers/active_record_schema_versions.rb
index a7c342e8053..68be3f126a0 100644
--- a/config/initializers/active_record_schema_versions.rb
+++ b/config/initializers/active_record_schema_versions.rb
@@ -1,5 +1,7 @@
# frozen_string_literal: true
-# Patch to use COPY in db/structure.sql when populating schema_migrations table
+# Patch to write version information as empty files under the db/schema_migrations directory
# This is intended to reduce potential for merge conflicts in db/structure.sql
-ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.prepend(Gitlab::Database::PostgresqlAdapter::SchemaVersionsCopyMixin)
+ActiveRecord::ConnectionAdapters::PostgreSQLAdapter.prepend(Gitlab::Database::PostgresqlAdapter::DumpSchemaVersionsMixin)
+# Patch to load version information from empty files under the db/schema_migrations directory
+ActiveRecord::Tasks::PostgreSQLDatabaseTasks.prepend(Gitlab::Database::PostgresqlDatabaseTasks::LoadSchemaVersionsMixin)
diff --git a/config/initializers/carrierwave_patch.rb b/config/initializers/carrierwave_patch.rb
new file mode 100644
index 00000000000..53fba307926
--- /dev/null
+++ b/config/initializers/carrierwave_patch.rb
@@ -0,0 +1,53 @@
+# frozen_string_literal: true
+
+require "carrierwave/storage/fog"
+
+# This pulls in https://github.com/carrierwaveuploader/carrierwave/pull/2504 to support
+# sending AWS S3 encryption headers when copying objects.
+#
+# This patch also incorporates
+# https://github.com/carrierwaveuploader/carrierwave/pull/2375 to
+# provide Azure support. This is already in CarrierWave v2.1.x, but
+# upgrading this gem is a significant task:
+# https://gitlab.com/gitlab-org/gitlab/-/issues/216067
+module CarrierWave
+ module Storage
+ class Fog < Abstract
+ class File
+ def copy_to(new_path)
+ connection.copy_object(@uploader.fog_directory, file.key, @uploader.fog_directory, new_path, copy_to_options)
+ CarrierWave::Storage::Fog::File.new(@uploader, @base, new_path)
+ end
+
+ def copy_to_options
+ acl_header.merge(@uploader.fog_attributes)
+ end
+
+ def authenticated_url(options = {})
+ if %w[AWS Google Rackspace OpenStack AzureRM].include?(@uploader.fog_credentials[:provider])
+ # avoid a get by using local references
+ local_directory = connection.directories.new(key: @uploader.fog_directory)
+ local_file = local_directory.files.new(key: path)
+ expire_at = ::Fog::Time.now + @uploader.fog_authenticated_url_expiration
+ case @uploader.fog_credentials[:provider]
+ when 'AWS', 'Google'
+ # Older versions of fog-google do not support options as a parameter
+ if url_options_supported?(local_file)
+ local_file.url(expire_at, options)
+ else
+ warn "Options hash not supported in #{local_file.class}. You may need to upgrade your Fog provider."
+ local_file.url(expire_at)
+ end
+ when 'Rackspace'
+ connection.get_object_https_url(@uploader.fog_directory, path, expire_at, options)
+ when 'OpenStack'
+ connection.get_object_https_url(@uploader.fog_directory, path, expire_at)
+ else
+ local_file.url(expire_at)
+ end
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/config/initializers/database_config.rb b/config/initializers/database_config.rb
index ce732677c74..cccd4335a7d 100644
--- a/config/initializers/database_config.rb
+++ b/config/initializers/database_config.rb
@@ -20,31 +20,34 @@ Gitlab.ee do
end
end
-# When running on multi-threaded runtimes like Puma or Sidekiq,
-# set the number of threads per process as the minimum DB connection pool size.
-# This is to avoid connectivity issues as was documented here:
-# https://github.com/rails/rails/pull/23057
-if Gitlab::Runtime.multi_threaded?
- max_threads = Gitlab::Runtime.max_threads
- db_config = Gitlab::Database.config ||
- Rails.application.config.database_configuration[Rails.env]
- previous_db_pool_size = db_config['pool']
-
- db_config['pool'] = [db_config['pool'].to_i, max_threads].max + ENV["DB_POOL_HEADROOM"].to_i
-
- ActiveRecord::Base.establish_connection(db_config)
-
- current_db_pool_size = ActiveRecord::Base.connection.pool.size
-
- log_pool_size('DB', previous_db_pool_size, current_db_pool_size)
-
- Gitlab.ee do
- if Gitlab::Runtime.sidekiq? && Gitlab::Geo.geo_database_configured?
- previous_geo_db_pool_size = Rails.configuration.geo_database['pool']
- Rails.configuration.geo_database['pool'] = max_threads
- Geo::TrackingBase.establish_connection(Rails.configuration.geo_database)
- current_geo_db_pool_size = Geo::TrackingBase.connection_pool.size
- log_pool_size('Geo DB', previous_geo_db_pool_size, current_geo_db_pool_size)
- end
+# Because of the way Ruby on Rails manages database connections, it is
+# important that we have at least as many connections as we have
+# threads. While there is a 'pool' setting in database.yml, it is not
+# very practical because you need to maintain it in tandem with the
+# number of application threads. Because of this we override the number
+# of allowed connections in the database connection pool based on the
+# configured number of application threads.
+#
+# Gitlab::Runtime.max_threads is the number of "user facing" application
+# threads the process has been configured with. We also have auxiliary
+# threads that use database connections. Because it is not practical to
+# keep an accurate count of the number auxiliary threads as the
+# application evolves over time, we just add a fixed headroom to the
+# number of user-facing threads. It is OK if this number is too large
+# because connections are instantiated lazily.
+
+headroom = (ENV["DB_POOL_HEADROOM"].presence || 10).to_i
+calculated_pool_size = Gitlab::Runtime.max_threads + headroom
+
+db_config = Gitlab::Database.config ||
+ Rails.application.config.database_configuration[Rails.env]
+
+db_config['pool'] = calculated_pool_size
+ActiveRecord::Base.establish_connection(db_config)
+
+Gitlab.ee do
+ if Gitlab::Runtime.sidekiq? && Gitlab::Geo.geo_database_configured?
+ Rails.configuration.geo_database['pool'] = calculated_pool_size
+ Geo::TrackingBase.establish_connection(Rails.configuration.geo_database)
end
end
diff --git a/config/initializers/direct_upload_support.rb b/config/initializers/direct_upload_support.rb
index 0fc6e82207e..94e90727f0c 100644
--- a/config/initializers/direct_upload_support.rb
+++ b/config/initializers/direct_upload_support.rb
@@ -1,5 +1,5 @@
class DirectUploadsValidator
- SUPPORTED_DIRECT_UPLOAD_PROVIDERS = %w(Google AWS).freeze
+ SUPPORTED_DIRECT_UPLOAD_PROVIDERS = %w(Google AWS AzureRM).freeze
ValidationError = Class.new(StandardError)
@@ -13,22 +13,32 @@ class DirectUploadsValidator
raise ValidationError, "No provider configured for '#{uploader_type}'. #{supported_provider_text}" if provider.blank?
- return if SUPPORTED_DIRECT_UPLOAD_PROVIDERS.include?(provider)
+ return if provider_loaded?(provider)
raise ValidationError, "Object storage provider '#{provider}' is not supported " \
"when 'direct_upload' is used for '#{uploader_type}'. #{supported_provider_text}"
end
+ private
+
+ def provider_loaded?(provider)
+ return false unless SUPPORTED_DIRECT_UPLOAD_PROVIDERS.include?(provider)
+
+ require 'fog/azurerm' if provider == 'AzureRM'
+
+ true
+ end
+
def supported_provider_text
- "Only #{SUPPORTED_DIRECT_UPLOAD_PROVIDERS.join(', ')} are supported."
+ "Only #{SUPPORTED_DIRECT_UPLOAD_PROVIDERS.to_sentence} are supported."
end
end
DirectUploadsValidator.new.tap do |validator|
CONFIGS = {
artifacts: Gitlab.config.artifacts,
- uploads: Gitlab.config.uploads,
- lfs: Gitlab.config.lfs
+ lfs: Gitlab.config.lfs,
+ uploads: Gitlab.config.uploads
}.freeze
CONFIGS.each do |uploader_type, uploader|
diff --git a/config/initializers/elastic_client_setup.rb b/config/initializers/elastic_client_setup.rb
index 21745bd81d8..5b8d81265ad 100644
--- a/config/initializers/elastic_client_setup.rb
+++ b/config/initializers/elastic_client_setup.rb
@@ -13,6 +13,7 @@ Gitlab.ee do
Elasticsearch::Model::Adapter::Multiple::Records.prepend GemExtensions::Elasticsearch::Model::Adapter::Multiple::Records
Elasticsearch::Model::Indexing::InstanceMethods.prepend GemExtensions::Elasticsearch::Model::Indexing::InstanceMethods
Elasticsearch::Model::Adapter::ActiveRecord::Importing.prepend GemExtensions::Elasticsearch::Model::Adapter::ActiveRecord::Importing
+ Elasticsearch::Model::Adapter::ActiveRecord::Records.prepend GemExtensions::Elasticsearch::Model::Adapter::ActiveRecord::Records
Elasticsearch::Model::Client::InstanceMethods.prepend GemExtensions::Elasticsearch::Model::Client
Elasticsearch::Model::Client::ClassMethods.prepend GemExtensions::Elasticsearch::Model::Client
Elasticsearch::Model::ClassMethods.prepend GemExtensions::Elasticsearch::Model::Client
diff --git a/config/initializers/lograge.rb b/config/initializers/lograge.rb
index 42c97e4aebd..e3601a9538e 100644
--- a/config/initializers/lograge.rb
+++ b/config/initializers/lograge.rb
@@ -5,7 +5,7 @@ unless Gitlab::Runtime.sidekiq?
Rails.application.configure do
config.lograge.enabled = true
# Store the lograge JSON files in a separate file
- config.lograge.keep_original_rails_log = true
+ config.lograge.keep_original_rails_log = Gitlab::Utils.to_boolean(ENV.fetch('UNSTRUCTURED_RAILS_LOG', 'true'))
# Don't use the Logstash formatter since this requires logstash-event, an
# unmaintained gem that monkey patches `Time`
config.lograge.formatter = Lograge::Formatters::Json.new
diff --git a/config/initializers/peek.rb b/config/initializers/peek.rb
index deac938c80b..fa74d8620f4 100644
--- a/config/initializers/peek.rb
+++ b/config/initializers/peek.rb
@@ -14,3 +14,9 @@ Peek.into Peek::Views::Rugged
Peek.into Peek::Views::BulletDetailed if defined?(Bullet)
Peek.into Peek::Views::Tracing if Labkit::Tracing.tracing_url_enabled?
+
+ActiveSupport::Notifications.subscribe('endpoint_run.grape') do |_name, _start, _finish, _id, payload|
+ if request_id = payload[:env]['action_dispatch.request_id']
+ Peek.adapter.save(request_id)
+ end
+end
diff --git a/config/initializers/postgres_partitioning.rb b/config/initializers/postgres_partitioning.rb
index 6c8a72d9bd5..b3f12c2ceb1 100644
--- a/config/initializers/postgres_partitioning.rb
+++ b/config/initializers/postgres_partitioning.rb
@@ -3,8 +3,10 @@
# Make sure we have loaded partitioned models here
# (even with eager loading disabled).
+Gitlab::Database::Partitioning::PartitionCreator.register(AuditEventPartitioned)
+
begin
- Gitlab::Database::Partitioning::PartitionCreator.new.create_partitions
+ Gitlab::Database::Partitioning::PartitionCreator.new.create_partitions unless ENV['DISABLE_POSTGRES_PARTITION_CREATION_ON_STARTUP']
rescue ActiveRecord::ActiveRecordError, PG::Error
# ignore - happens when Rake tasks yet have to create a database, e.g. for testing
end
diff --git a/config/initializers/rails_host_authorization.rb b/config/initializers/rails_host_authorization.rb
index 6cca39ea95b..7d719dd519f 100644
--- a/config/initializers/rails_host_authorization.rb
+++ b/config/initializers/rails_host_authorization.rb
@@ -3,7 +3,7 @@
# This file requires config/initializers/1_settings.rb
if Rails.env.development?
- Rails.application.config.hosts += [Gitlab.config.gitlab.host, 'unix']
+ Rails.application.config.hosts += [Gitlab.config.gitlab.host, 'unix', 'host.docker.internal']
if ENV['RAILS_HOSTS']
additional_hosts = ENV['RAILS_HOSTS'].split(',').select(&:presence)
diff --git a/config/initializers/sidekiq_cluster.rb b/config/initializers/sidekiq_cluster.rb
index 4ff8dd9b936..2f9c1de47eb 100644
--- a/config/initializers/sidekiq_cluster.rb
+++ b/config/initializers/sidekiq_cluster.rb
@@ -14,10 +14,10 @@ if ENV['ENABLE_SIDEKIQ_CLUSTER']
if Process.ppid != parent
Process.kill(:TERM, Process.pid)
- # Wait for just a few extra seconds for a final attempt to
- # gracefully terminate. Considering the parent (cluster) process
- # have changed (SIGKILL'd), it shouldn't take long to shutdown.
- sleep(5)
+ # Allow sidekiq to cleanly terminate and push any running jobs back
+ # into the queue. We use the configured timeout and add a small
+ # grace period
+ sleep(Sidekiq.options[:timeout] + 5)
# Signaling the Sidekiq Pgroup as KILL is not forwarded to
# a possible child process. In Sidekiq Cluster, all child Sidekiq
diff --git a/config/initializers/stackprof.rb b/config/initializers/stackprof.rb
index 5497ff9a459..797efdb9bbd 100644
--- a/config/initializers/stackprof.rb
+++ b/config/initializers/stackprof.rb
@@ -8,94 +8,122 @@
# * timeout profile after 30 seconds
# * write to $TMPDIR/stackprof.$PID.$RAND.profile
-if Gitlab::Utils.to_boolean(ENV['STACKPROF_ENABLED'].to_s)
- Gitlab::Cluster::LifecycleEvents.on_worker_start do
- require 'stackprof'
- require 'tmpdir'
+module Gitlab
+ class StackProf
+ # this is a workaround for sidekiq, which defines its own SIGUSR2 handler.
+ # by defering to the sidekiq startup event, we get to set up our own
+ # handler late enough.
+ # see also: https://github.com/mperham/sidekiq/pull/4653
+ def self.install
+ require 'stackprof'
+ require 'tmpdir'
+
+ if Gitlab::Runtime.sidekiq?
+ Sidekiq.configure_server do |config|
+ config.on :startup do
+ on_worker_start
+ end
+ end
+ else
+ Gitlab::Cluster::LifecycleEvents.on_worker_start do
+ on_worker_start
+ end
+ end
+ end
- Gitlab::AppJsonLogger.info "stackprof: listening on SIGUSR2 signal"
+ def self.on_worker_start
+ Gitlab::AppJsonLogger.info(
+ event: "stackprof",
+ message: "listening on SIGUSR2 signal",
+ pid: Process.pid
+ )
- # create a pipe in order to propagate signal out of the signal handler
- # see also: https://cr.yp.to/docs/selfpipe.html
- read, write = IO.pipe
+ # create a pipe in order to propagate signal out of the signal handler
+ # see also: https://cr.yp.to/docs/selfpipe.html
+ read, write = IO.pipe
- # create a separate thread that polls for signals on the pipe.
- #
- # this way we do not execute in signal handler context, which
- # lifts restrictions and also serializes the calls in a thread-safe
- # manner.
- #
- # it's very similar to a goroutine and channel design.
- #
- # another nice benefit of this method is that we can timeout the
- # IO.select call, allowing the profile to automatically stop after
- # a given interval (by default 30 seconds), avoiding unbounded memory
- # growth from a profile that was started and never stopped.
- t = Thread.new do
- timeout_s = ENV['STACKPROF_TIMEOUT_S']&.to_i || 30
- current_timeout_s = nil
- loop do
- got_value = IO.select([read], nil, nil, current_timeout_s)
- read.getbyte if got_value
+ # create a separate thread that polls for signals on the pipe.
+ #
+ # this way we do not execute in signal handler context, which
+ # lifts restrictions and also serializes the calls in a thread-safe
+ # manner.
+ #
+ # it's very similar to a goroutine and channel design.
+ #
+ # another nice benefit of this method is that we can timeout the
+ # IO.select call, allowing the profile to automatically stop after
+ # a given interval (by default 30 seconds), avoiding unbounded memory
+ # growth from a profile that was started and never stopped.
+ t = Thread.new do
+ timeout_s = ENV['STACKPROF_TIMEOUT_S']&.to_i || 30
+ current_timeout_s = nil
+ loop do
+ got_value = IO.select([read], nil, nil, current_timeout_s)
+ read.getbyte if got_value
- if StackProf.running?
- stackprof_file_prefix = ENV['STACKPROF_FILE_PREFIX'] || Dir.tmpdir
- stackprof_out_file = "#{stackprof_file_prefix}/stackprof.#{Process.pid}.#{SecureRandom.hex(6)}.profile"
+ if ::StackProf.running?
+ stackprof_file_prefix = ENV['STACKPROF_FILE_PREFIX'] || Dir.tmpdir
+ stackprof_out_file = "#{stackprof_file_prefix}/stackprof.#{Process.pid}.#{SecureRandom.hex(6)}.profile"
- Gitlab::AppJsonLogger.info(
- event: "stackprof",
- message: "stopping profile",
- output_filename: stackprof_out_file,
- pid: Process.pid,
- timeout_s: timeout_s,
- timed_out: got_value.nil?
- )
+ Gitlab::AppJsonLogger.info(
+ event: "stackprof",
+ message: "stopping profile",
+ output_filename: stackprof_out_file,
+ pid: Process.pid,
+ timeout_s: timeout_s,
+ timed_out: got_value.nil?
+ )
- StackProf.stop
- StackProf.results(stackprof_out_file)
- current_timeout_s = nil
- else
- Gitlab::AppJsonLogger.info(
- event: "stackprof",
- message: "starting profile",
- pid: Process.pid
- )
+ ::StackProf.stop
+ ::StackProf.results(stackprof_out_file)
+ current_timeout_s = nil
+ else
+ Gitlab::AppJsonLogger.info(
+ event: "stackprof",
+ message: "starting profile",
+ pid: Process.pid
+ )
- StackProf.start(
- mode: :cpu,
- raw: Gitlab::Utils.to_boolean(ENV['STACKPROF_RAW'] || 'true'),
- interval: ENV['STACKPROF_INTERVAL_US']&.to_i || 10_000
- )
- current_timeout_s = timeout_s
+ ::StackProf.start(
+ mode: :cpu,
+ raw: Gitlab::Utils.to_boolean(ENV['STACKPROF_RAW'] || 'true'),
+ interval: ENV['STACKPROF_INTERVAL_US']&.to_i || 10_000
+ )
+ current_timeout_s = timeout_s
+ end
end
end
- end
- t.abort_on_exception = true
+ t.abort_on_exception = true
- # in the case of puma, this will override the existing SIGUSR2 signal handler
- # that can be used to trigger a restart.
- #
- # puma cluster has two types of restarts:
- # * SIGUSR1: phased restart
- # * SIGUSR2: restart
- #
- # phased restart is not supported in our configuration, because we use
- # preload_app. this means we will always perform a normal restart.
- # additionally, phased restart is not supported when sending a SIGUSR2
- # directly to a puma worker (as opposed to the master process).
- #
- # the result is that the behaviour of SIGUSR1 and SIGUSR2 is identical in
- # our configuration, and we can always use a SIGUSR1 to perform a restart.
- #
- # thus, it is acceptable for us to re-appropriate the SIGUSR2 signal, and
- # override the puma behaviour.
- #
- # see also:
- # * https://github.com/puma/puma/blob/master/docs/signals.md#puma-signals
- # * https://github.com/phusion/unicorn/blob/master/SIGNALS
- # * https://github.com/mperham/sidekiq/wiki/Signals
- Signal.trap('SIGUSR2') do
- write.write('.')
+ # in the case of puma, this will override the existing SIGUSR2 signal handler
+ # that can be used to trigger a restart.
+ #
+ # puma cluster has two types of restarts:
+ # * SIGUSR1: phased restart
+ # * SIGUSR2: restart
+ #
+ # phased restart is not supported in our configuration, because we use
+ # preload_app. this means we will always perform a normal restart.
+ # additionally, phased restart is not supported when sending a SIGUSR2
+ # directly to a puma worker (as opposed to the master process).
+ #
+ # the result is that the behaviour of SIGUSR1 and SIGUSR2 is identical in
+ # our configuration, and we can always use a SIGUSR1 to perform a restart.
+ #
+ # thus, it is acceptable for us to re-appropriate the SIGUSR2 signal, and
+ # override the puma behaviour.
+ #
+ # see also:
+ # * https://github.com/puma/puma/blob/master/docs/signals.md#puma-signals
+ # * https://github.com/phusion/unicorn/blob/master/SIGNALS
+ # * https://github.com/mperham/sidekiq/wiki/Signals
+ Signal.trap('SIGUSR2') do
+ write.write('.')
+ end
end
end
end
+
+if Gitlab::Utils.to_boolean(ENV['STACKPROF_ENABLED'].to_s)
+ Gitlab::StackProf.install
+end
diff --git a/config/initializers/time_zone.rb b/config/initializers/time_zone.rb
index ee246e67d66..bca7411ad63 100644
--- a/config/initializers/time_zone.rb
+++ b/config/initializers/time_zone.rb
@@ -1 +1,8 @@
Time.zone = Gitlab.config.gitlab.time_zone || Time.zone
+# The default is normally set by Rails in the
+# active_support.initialize_time_zone Railtie, but we need to set it
+# here because the config settings aren't available until after that
+# runs. We set the default to ensure multi-threaded servers have the
+# right value.
+Time.zone_default = Time.zone
+Rails.application.config.time_zone = Time.zone
diff --git a/config/initializers/validate_puma.rb b/config/initializers/validate_puma.rb
index 5abcfbfe6be..ac5678c4b5a 100644
--- a/config/initializers/validate_puma.rb
+++ b/config/initializers/validate_puma.rb
@@ -1,5 +1,5 @@
# frozen_string_literal: true
-if Gitlab::Runtime.puma? && ::Puma.cli_config.options[:workers].to_i.zero?
+if Gitlab::Runtime.puma? && ::Puma.cli_config.options[:workers].to_i == 0
raise 'Puma is only supported in Cluster-mode: workers > 0'
end
diff --git a/config/initializers/zz_metrics.rb b/config/initializers/zz_metrics.rb
index 7e675e478cf..8e31e4f9282 100644
--- a/config/initializers/zz_metrics.rb
+++ b/config/initializers/zz_metrics.rb
@@ -147,7 +147,6 @@ if Gitlab::Metrics.enabled? && !Rails.env.test? && !(Rails.env.development? && d
Gitlab::Application.configure do |config|
config.middleware.use(Gitlab::Metrics::RackMiddleware)
config.middleware.use(Gitlab::Middleware::RailsQueueDuration)
- config.middleware.use(Gitlab::Metrics::RedisRackMiddleware)
config.middleware.use(Gitlab::Metrics::ElasticsearchRackMiddleware)
end
@@ -199,7 +198,7 @@ if Gitlab::Metrics.enabled? && !Rails.env.test? && !(Rails.env.development? && d
val = super
if current_transaction = ::Gitlab::Metrics::Transaction.current
- current_transaction.increment(:new_redis_connections, 1)
+ current_transaction.increment(:gitlab_transaction_new_redis_connections_total, 1)
end
val