Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitaly.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSteve Azzopardi <sazzopardi@gitlab.com>2023-05-08 16:25:22 +0300
committerSteve Azzopardi <sazzopardi@gitlab.com>2023-05-08 17:21:09 +0300
commit3afe69c89f430661c60e510feb0d50b852f1b26a (patch)
treea55f6928a84e0f9e2b893e3c9ac4b1f6f86b24df
parentbf616bd24e576aa6792f1a9173ca68cc32d42319 (diff)
Limithandler: change type labelfix/gitaly-limit-type
What --- Change the `type` label to `limiting_key` for the following metrics: - `gitaly_pack_objects_acquiring_seconds` - `gitaly_pack_objects_in_progress` - `gitaly_pack_objects_queue` - `gitaly_pack_objects_dropped_total` Why --- For GitLab.com the `type` label is a [reserved label](https://gitlab.com/gitlab-com/runbooks/-/blob/d8a93d3ceba75e26c34c6e524a75d5fcc150e7bf/libsonnet/label-taxonomy/README.md#L18) that is used to specify the name of the service. Using the same label is causing collision and some render properly. This also ends up using the same label as we do for [logs](https://gitlab.com/gitlab-org/gitaly/-/blob/bf616bd24e576aa6792f1a9173ca68cc32d42319/internal/middleware/limithandler/stats.go#L74) which brings consistency. I'm not sure if this will be considered a breaking change since we are changing the label of a metric that is hidden behind the feature flags `pack_objects_limit_*` Reference: https://gitlab.com/gitlab-org/gitaly/-/issues/5099 Signed-off-by: Steve Azzopardi <sazzopardi@gitlab.com>
-rw-r--r--internal/gitaly/service/hook/pack_objects_test.go20
-rw-r--r--internal/middleware/limithandler/monitor.go22
-rw-r--r--internal/middleware/limithandler/monitor_test.go32
3 files changed, 37 insertions, 37 deletions
diff --git a/internal/gitaly/service/hook/pack_objects_test.go b/internal/gitaly/service/hook/pack_objects_test.go
index 4c0a5342d..4bc570569 100644
--- a/internal/gitaly/service/hook/pack_objects_test.go
+++ b/internal/gitaly/service/hook/pack_objects_test.go
@@ -792,14 +792,14 @@ func testPackObjectsConcurrency(t *testing.T, ctx context.Context) {
cfg := cfgWithCache(t, 0)
- var keyType string
+ var limitingKey string
if featureflag.PackObjectsLimitingRepo.IsEnabled(ctx) {
- keyType = "repo"
+ limitingKey = "repo"
} else if featureflag.PackObjectsLimitingUser.IsEnabled(ctx) {
- keyType = "user"
+ limitingKey = "user"
} else if featureflag.PackObjectsLimitingRemoteIP.IsEnabled(ctx) {
- keyType = "remote_ip"
+ limitingKey = "remote_ip"
}
args := []string{"pack-objects", "--revs", "--thin", "--stdout", "--progress", "--delta-base-offset"}
@@ -989,7 +989,7 @@ func testPackObjectsConcurrency(t *testing.T, ctx context.Context) {
t.Run(tc.desc, func(t *testing.T) {
ticker := helper.NewManualTicker()
monitor := limithandler.NewPackObjectsConcurrencyMonitor(
- keyType,
+ limitingKey,
cfg.Prometheus.GRPCLatencyBuckets,
)
limiter := limithandler.NewConcurrencyLimiter(
@@ -1069,8 +1069,8 @@ func testPackObjectsConcurrency(t *testing.T, ctx context.Context) {
testutil.GatherAndCompare(registry,
bytes.NewBufferString(fmt.Sprintf(`# HELP gitaly_pack_objects_in_progress Gauge of number of concurrent in-progress calls
# TYPE gitaly_pack_objects_in_progress gauge
-gitaly_pack_objects_in_progress{type=%q} 1
-`, keyType)), "gitaly_pack_objects_in_progress"))
+gitaly_pack_objects_in_progress{limiting_key=%q} 1
+`, limitingKey)), "gitaly_pack_objects_in_progress"))
ticker.Tick()
@@ -1085,11 +1085,11 @@ gitaly_pack_objects_in_progress{type=%q} 1
expectedMetrics := bytes.NewBufferString(fmt.Sprintf(`# HELP gitaly_pack_objects_dropped_total Number of requests dropped from the queue
# TYPE gitaly_pack_objects_dropped_total counter
-gitaly_pack_objects_dropped_total{reason="max_time", type=%q} 1
+gitaly_pack_objects_dropped_total{limiting_key=%q,reason="max_time"} 1
# HELP gitaly_pack_objects_queued Gauge of number of queued calls
# TYPE gitaly_pack_objects_queued gauge
-gitaly_pack_objects_queued{type=%q} 0
-`, keyType, keyType))
+gitaly_pack_objects_queued{limiting_key=%q} 0
+`, limitingKey, limitingKey))
require.NoError(t,
testutil.GatherAndCompare(registry, expectedMetrics,
diff --git a/internal/middleware/limithandler/monitor.go b/internal/middleware/limithandler/monitor.go
index aa44d3e20..740e85275 100644
--- a/internal/middleware/limithandler/monitor.go
+++ b/internal/middleware/limithandler/monitor.go
@@ -110,16 +110,16 @@ func (p *PromMonitor) Dropped(ctx context.Context, key string, length int, reaso
func newPromMonitor(
limitingType string,
- keyType string,
+ limitingKey string,
queuedVec, inProgressVec *prometheus.GaugeVec,
acquiringSecondsVec *prometheus.HistogramVec,
requestsDroppedVec *prometheus.CounterVec,
) *PromMonitor {
return &PromMonitor{
limitingType: limitingType,
- queuedMetric: queuedVec.WithLabelValues(keyType),
- inProgressMetric: inProgressVec.WithLabelValues(keyType),
- acquiringSecondsMetric: acquiringSecondsVec.WithLabelValues(keyType),
+ queuedMetric: queuedVec.WithLabelValues(limitingKey),
+ inProgressMetric: inProgressVec.WithLabelValues(limitingKey),
+ acquiringSecondsMetric: acquiringSecondsVec.WithLabelValues(limitingKey),
requestsDroppedMetric: requestsDroppedVec,
acquiringSecondsHistogramVec: acquiringSecondsVec,
}
@@ -149,14 +149,14 @@ func splitMethodName(fullMethodName string) (string, string) {
// NewPackObjectsConcurrencyMonitor returns a concurrency monitor for use
// with limiting pack objects processes.
-func NewPackObjectsConcurrencyMonitor(keyType string, latencyBuckets []float64) *PromMonitor {
+func NewPackObjectsConcurrencyMonitor(limitingKey string, latencyBuckets []float64) *PromMonitor {
acquiringSecondsVec := prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "gitaly_pack_objects_acquiring_seconds",
Help: "Histogram of time calls are rate limited (in seconds)",
Buckets: latencyBuckets,
},
- []string{"type"},
+ []string{"limiting_key"},
)
inProgressVec := prometheus.NewGaugeVec(
@@ -164,7 +164,7 @@ func NewPackObjectsConcurrencyMonitor(keyType string, latencyBuckets []float64)
Name: "gitaly_pack_objects_in_progress",
Help: "Gauge of number of concurrent in-progress calls",
},
- []string{"type"},
+ []string{"limiting_key"},
)
queuedVec := prometheus.NewGaugeVec(
@@ -172,7 +172,7 @@ func NewPackObjectsConcurrencyMonitor(keyType string, latencyBuckets []float64)
Name: "gitaly_pack_objects_queued",
Help: "Gauge of number of queued calls",
},
- []string{"type"},
+ []string{"limiting_key"},
)
requestsDroppedVec := prometheus.NewCounterVec(
@@ -180,12 +180,12 @@ func NewPackObjectsConcurrencyMonitor(keyType string, latencyBuckets []float64)
Name: "gitaly_pack_objects_dropped_total",
Help: "Number of requests dropped from the queue",
},
- []string{"type", "reason"},
- ).MustCurryWith(prometheus.Labels{"type": keyType})
+ []string{"limiting_key", "reason"},
+ ).MustCurryWith(prometheus.Labels{"limiting_key": limitingKey})
return newPromMonitor(
TypePackObjects,
- keyType,
+ limitingKey,
queuedVec,
inProgressVec,
acquiringSecondsVec,
diff --git a/internal/middleware/limithandler/monitor_test.go b/internal/middleware/limithandler/monitor_test.go
index 751332683..20fd2db0d 100644
--- a/internal/middleware/limithandler/monitor_test.go
+++ b/internal/middleware/limithandler/monitor_test.go
@@ -127,26 +127,26 @@ func TestNewPackObjectsConcurrencyMonitor(t *testing.T) {
expectedMetrics := `# HELP gitaly_pack_objects_acquiring_seconds Histogram of time calls are rate limited (in seconds)
# TYPE gitaly_pack_objects_acquiring_seconds histogram
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="0.001"} 0
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="0.005"} 0
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="0.025"} 0
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="0.1"} 0
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="0.5"} 0
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="1"} 1
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="10"} 1
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="30"} 1
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="60"} 1
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="300"} 1
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="1500"} 1
-gitaly_pack_objects_acquiring_seconds_bucket{type="user",le="+Inf"} 1
-gitaly_pack_objects_acquiring_seconds_sum{type="user"} 1
-gitaly_pack_objects_acquiring_seconds_count{type="user"} 1
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="0.001"} 0
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="0.005"} 0
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="0.025"} 0
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="0.1"} 0
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="0.5"} 0
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="1"} 1
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="10"} 1
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="30"} 1
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="60"} 1
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="300"} 1
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="1500"} 1
+gitaly_pack_objects_acquiring_seconds_bucket{limiting_key="user",le="+Inf"} 1
+gitaly_pack_objects_acquiring_seconds_sum{limiting_key="user"} 1
+gitaly_pack_objects_acquiring_seconds_count{limiting_key="user"} 1
# HELP gitaly_pack_objects_dropped_total Number of requests dropped from the queue
# TYPE gitaly_pack_objects_dropped_total counter
-gitaly_pack_objects_dropped_total{reason="load",type="user"} 1
+gitaly_pack_objects_dropped_total{limiting_key="user",reason="load"} 1
# HELP gitaly_pack_objects_queued Gauge of number of queued calls
# TYPE gitaly_pack_objects_queued gauge
-gitaly_pack_objects_queued{type="user"} 1
+gitaly_pack_objects_queued{limiting_key="user"} 1
`
require.NoError(t, testutil.CollectAndCompare(