Welcome to mirror list, hosted at ThFree Co, Russian Federation.

7_prometheus_metrics.rb « initializers « config - gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: c55b074f9c8a5599331b64aee328310815207394 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
# frozen_string_literal: true

# Keep separate directories for separate processes
def prometheus_default_multiproc_dir
  return unless Rails.env.development? || Rails.env.test?

  if Gitlab::Runtime.sidekiq?
    Rails.root.join('tmp/prometheus_multiproc_dir/sidekiq')
  elsif Gitlab::Runtime.puma?
    Rails.root.join('tmp/prometheus_multiproc_dir/puma')
  else
    Rails.root.join('tmp/prometheus_multiproc_dir')
  end
end

def puma_metrics_server_process?
  Prometheus::PidProvider.worker_id == 'puma_master'
end

def sidekiq_metrics_server_process?
  Gitlab::Runtime.sidekiq? && (!ENV['SIDEKIQ_WORKER_ID'] || ENV['SIDEKIQ_WORKER_ID'] == '0')
end

if puma_metrics_server_process? || sidekiq_metrics_server_process?
  # The following is necessary to ensure stale Prometheus metrics don't accumulate over time.
  # It needs to be done as early as here to ensure metrics files aren't deleted.
  # After we hit our app in `warmup`, first metrics and corresponding files already being created,
  # for example in `lib/gitlab/metrics/requests_rack_middleware.rb`.
  Prometheus::CleanupMultiprocDirService.new.execute

  ::Prometheus::Client.reinitialize_on_pid_change(force: true)
end

::Prometheus::Client.configure do |config|
  config.logger = Gitlab::AppLogger

  config.initial_mmap_file_size = 4 * 1024

  config.multiprocess_files_dir = ENV['prometheus_multiproc_dir'] || prometheus_default_multiproc_dir

  config.pid_provider = ::Prometheus::PidProvider.method(:worker_id)
end

Gitlab::Application.configure do |config|
  # 0 should be Sentry to catch errors in this middleware
  config.middleware.insert_after(Labkit::Middleware::Rack, Gitlab::Metrics::RequestsRackMiddleware)
end

if Gitlab::Runtime.sidekiq? && (!ENV['SIDEKIQ_WORKER_ID'] || ENV['SIDEKIQ_WORKER_ID'] == '0')
  # The single worker outside of a sidekiq-cluster, or the first worker (sidekiq_0)
  # in a cluster of processes, is responsible for serving health checks.
  #
  # Do not clean the metrics directory here - the supervisor script should
  # have already taken care of that.
  Sidekiq.configure_server do |config|
    config.on(:startup) do
      # In https://gitlab.com/gitlab-org/gitlab/-/issues/345804 we are looking to
      # only serve health-checks from a worker process; for backwards compatibility
      # we still go through the metrics exporter server, but start to configure it
      # with the new settings keys.
      exporter_settings = Settings.monitoring.sidekiq_health_checks
      Gitlab::Metrics::Exporter::SidekiqExporter.instance(exporter_settings).start
    end
  end
end

if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
  Gitlab::Cluster::LifecycleEvents.on_master_start do
    if Gitlab::Runtime.puma?
      Gitlab::Metrics::Samplers::PumaSampler.instance.start
    end

    Gitlab::Metrics.gauge(:deployments, 'GitLab Version', {}, :max).set({ version: Gitlab::VERSION, revision: Gitlab.revision }, 1)

    Gitlab::Ci::Parsers.instrument!
  rescue IOError => e
    Gitlab::ErrorTracking.track_exception(e)
    Gitlab::Metrics.error_detected!
  end

  Gitlab::Cluster::LifecycleEvents.on_worker_start do
    defined?(::Prometheus::Client.reinitialize_on_pid_change) && ::Prometheus::Client.reinitialize_on_pid_change
    logger = Gitlab::AppLogger
    Gitlab::Metrics::Samplers::RubySampler.initialize_instance(logger: logger).start
    Gitlab::Metrics::Samplers::DatabaseSampler.initialize_instance(logger: logger).start
    Gitlab::Metrics::Samplers::ThreadsSampler.initialize_instance(logger: logger).start

    if Gitlab::Runtime.web_server?
      Gitlab::Metrics::Samplers::ActionCableSampler.instance(logger: logger).start
    end

    if Gitlab.ee? && Gitlab::Runtime.sidekiq?
      Gitlab::Metrics::Samplers::GlobalSearchSampler.instance(logger: logger).start
    end

    Gitlab::Ci::Parsers.instrument!
  rescue IOError => e
    Gitlab::ErrorTracking.track_exception(e)
    Gitlab::Metrics.error_detected!
  end
end

if Gitlab::Runtime.web_server?
  Gitlab::Cluster::LifecycleEvents.on_master_start do
    Gitlab::Metrics::Exporter::WebExporter.instance.start
  end

  Gitlab::Cluster::LifecycleEvents.on_before_graceful_shutdown do
    # We need to ensure that before we re-exec or shutdown server
    # we do stop the exporter
    Gitlab::Metrics::Exporter::WebExporter.instance.stop
  end

  Gitlab::Cluster::LifecycleEvents.on_before_master_restart do
    # We need to ensure that before we re-exec server
    # we do stop the exporter
    #
    # We do it again, for being extra safe,
    # but it should not be needed
    Gitlab::Metrics::Exporter::WebExporter.instance.stop
  end

  Gitlab::Cluster::LifecycleEvents.on_worker_start do
    # The `#close_on_exec=` takes effect only on `execve`
    # but this does not happen for Ruby fork
    #
    # This does stop server, as it is running on master.
    Gitlab::Metrics::Exporter::WebExporter.instance.stop
  end
end