Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-pages.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJaime Martinez <jmartinez@gitlab.com>2021-12-01 03:34:58 +0300
committerJaime Martinez <jmartinez@gitlab.com>2021-12-01 03:34:58 +0300
commitd228c13c76d11091f8514e40940dc7df1b633e5a (patch)
tree866eb85df7764f775494d9f8a4db6792e022eed0
parente3c2565e7086a38f02dd5175e51dde187ce5457f (diff)
parent17c333c2a8de78c08dd070431652cc844fcc7b57 (diff)
Merge branch 'refactor/limiter-new' into 'master'
refactor: make internal/lru and internal/ratelimiter metrics optional Closes #647 See merge request gitlab-org/gitlab-pages!606
-rw-r--r--app.go12
-rw-r--r--internal/lru/lru.go79
-rw-r--r--internal/ratelimiter/middleware.go8
-rw-r--r--internal/ratelimiter/middleware_test.go12
-rw-r--r--internal/ratelimiter/ratelimiter.go22
-rw-r--r--internal/ratelimiter/ratelimiter_test.go11
-rw-r--r--internal/vfs/zip/vfs.go16
7 files changed, 110 insertions, 50 deletions
diff --git a/app.go b/app.go
index 3f286033..18513f0d 100644
--- a/app.go
+++ b/app.go
@@ -31,6 +31,7 @@ import (
"gitlab.com/gitlab-org/gitlab-pages/internal/handlers"
"gitlab.com/gitlab-org/gitlab-pages/internal/httperrors"
"gitlab.com/gitlab-org/gitlab-pages/internal/logging"
+ "gitlab.com/gitlab-org/gitlab-pages/internal/lru"
"gitlab.com/gitlab-org/gitlab-pages/internal/netutil"
"gitlab.com/gitlab-org/gitlab-pages/internal/ratelimiter"
"gitlab.com/gitlab-org/gitlab-pages/internal/rejectmethods"
@@ -266,9 +267,14 @@ func (a *theApp) buildHandlerPipeline() (http.Handler, error) {
if a.config.RateLimit.SourceIPLimitPerSecond > 0 {
rl := ratelimiter.New(
- metrics.RateLimitSourceIPBlockedCount,
- metrics.RateLimitSourceIPCachedEntries,
- metrics.RateLimitSourceIPCacheRequests,
+ lru.New("source_ip",
+ // based on an avg ~4,000 unique IPs per minute
+ // https://log.gprd.gitlab.net/app/lens#/edit/f7110d00-2013-11ec-8c8e-ed83b5469915?_g=h@e78830b
+ lru.WithMaxSize(5000),
+ lru.WithCachedEntriesMetric(metrics.RateLimitSourceIPCachedEntries),
+ lru.WithCachedRequestsMetric(metrics.RateLimitSourceIPCacheRequests),
+ ),
+ ratelimiter.WithBlockedCountMetric(metrics.RateLimitSourceIPBlockedCount),
ratelimiter.WithSourceIPLimitPerSecond(a.config.RateLimit.SourceIPLimitPerSecond),
ratelimiter.WithSourceIPBurstSize(a.config.RateLimit.SourceIPBurst),
)
diff --git a/internal/lru/lru.go b/internal/lru/lru.go
index f36d2a10..dafb5347 100644
--- a/internal/lru/lru.go
+++ b/internal/lru/lru.go
@@ -17,32 +17,49 @@ const getsPerPromote = 64
// needs to be pruned on OOM, this prunes 1/16 of items
const itemsToPruneDiv = 16
+const defaultCacheMaxSize = 1000
+const defaultCacheExpirationInterval = time.Minute
+
+// Option function to configure a Cache
+type Option func(*Cache)
+
// Cache wraps a ccache and allows setting custom metrics for hits/misses.
+// duration and maxSize are initialized to their default values but should
+// be configured using WithExpirationInterval and WithMaxSize options.
type Cache struct {
op string
duration time.Duration
+ maxSize int64
cache *ccache.Cache
metricCachedEntries *prometheus.GaugeVec
metricCacheRequests *prometheus.CounterVec
}
// New creates an LRU cache
-func New(op string, maxEntries int64, duration time.Duration, cachedEntriesMetric *prometheus.GaugeVec, cacheRequestsMetric *prometheus.CounterVec) *Cache {
+func New(op string, opts ...Option) *Cache {
+ c := &Cache{
+ op: op,
+ duration: defaultCacheExpirationInterval,
+ maxSize: defaultCacheMaxSize,
+ }
+
+ for _, opt := range opts {
+ opt(c)
+ }
+
configuration := ccache.Configure()
- configuration.MaxSize(maxEntries)
- configuration.ItemsToPrune(uint32(maxEntries) / itemsToPruneDiv)
+ configuration.MaxSize(c.maxSize)
+ configuration.ItemsToPrune(uint32(c.maxSize) / itemsToPruneDiv)
configuration.GetsPerPromote(getsPerPromote) // if item gets requested frequently promote it
configuration.OnDelete(func(*ccache.Item) {
- cachedEntriesMetric.WithLabelValues(op).Dec()
+ if c.metricCachedEntries != nil {
+ c.metricCachedEntries.WithLabelValues(op).Dec()
+ }
})
- return &Cache{
- op: op,
- cache: ccache.New(configuration),
- duration: duration,
- metricCachedEntries: cachedEntriesMetric,
- metricCacheRequests: cacheRequestsMetric,
- }
+ c.cache = ccache.New(configuration)
+
+ return c
}
// FindOrFetch will try to get the item from the cache if exists and is not expired.
@@ -51,20 +68,52 @@ func (c *Cache) FindOrFetch(cacheNamespace, key string, fetchFn func() (interfac
item := c.cache.Get(cacheNamespace + key)
if item != nil && !item.Expired() {
- c.metricCacheRequests.WithLabelValues(c.op, "hit").Inc()
+ if c.metricCacheRequests != nil {
+ c.metricCacheRequests.WithLabelValues(c.op, "hit").Inc()
+ }
return item.Value(), nil
}
value, err := fetchFn()
if err != nil {
- c.metricCacheRequests.WithLabelValues(c.op, "error").Inc()
+ if c.metricCacheRequests != nil {
+ c.metricCacheRequests.WithLabelValues(c.op, "error").Inc()
+ }
return nil, err
}
- c.metricCacheRequests.WithLabelValues(c.op, "miss").Inc()
- c.metricCachedEntries.WithLabelValues(c.op).Inc()
+ if c.metricCacheRequests != nil {
+ c.metricCacheRequests.WithLabelValues(c.op, "miss").Inc()
+ }
+ if c.metricCachedEntries != nil {
+ c.metricCachedEntries.WithLabelValues(c.op).Inc()
+ }
c.cache.Set(cacheNamespace+key, value, c.duration)
return value, nil
}
+
+func WithCachedEntriesMetric(m *prometheus.GaugeVec) Option {
+ return func(c *Cache) {
+ c.metricCachedEntries = m
+ }
+}
+
+func WithCachedRequestsMetric(m *prometheus.CounterVec) Option {
+ return func(c *Cache) {
+ c.metricCacheRequests = m
+ }
+}
+
+func WithExpirationInterval(t time.Duration) Option {
+ return func(c *Cache) {
+ c.duration = t
+ }
+}
+
+func WithMaxSize(i int64) Option {
+ return func(c *Cache) {
+ c.maxSize = i
+ }
+}
diff --git a/internal/ratelimiter/middleware.go b/internal/ratelimiter/middleware.go
index 0cd5b81e..f26cb2e0 100644
--- a/internal/ratelimiter/middleware.go
+++ b/internal/ratelimiter/middleware.go
@@ -28,12 +28,16 @@ func (rl *RateLimiter) SourceIPLimiter(handler http.Handler) http.Handler {
// Only drop requests once FF_ENABLE_RATE_LIMITER is enabled
// https://gitlab.com/gitlab-org/gitlab-pages/-/issues/629
if rateLimiterEnabled() {
- rl.sourceIPBlockedCount.WithLabelValues("true").Inc()
+ if rl.sourceIPBlockedCount != nil {
+ rl.sourceIPBlockedCount.WithLabelValues("true").Inc()
+ }
httperrors.Serve429(w)
return
}
- rl.sourceIPBlockedCount.WithLabelValues("false").Inc()
+ if rl.sourceIPBlockedCount != nil {
+ rl.sourceIPBlockedCount.WithLabelValues("false").Inc()
+ }
}
handler.ServeHTTP(w, r)
diff --git a/internal/ratelimiter/middleware_test.go b/internal/ratelimiter/middleware_test.go
index 2e51fcad..6560956c 100644
--- a/internal/ratelimiter/middleware_test.go
+++ b/internal/ratelimiter/middleware_test.go
@@ -10,6 +10,7 @@ import (
testlog "github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
+ "gitlab.com/gitlab-org/gitlab-pages/internal/lru"
"gitlab.com/gitlab-org/gitlab-pages/internal/testhelpers"
)
@@ -24,11 +25,11 @@ var next = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
func TestSourceIPLimiterWithDifferentLimits(t *testing.T) {
hook := testlog.NewGlobal()
testhelpers.SetEnvironmentVariable(t, testhelpers.FFEnableRateLimiter, "true")
- blocked, cachedEntries, cacheReqs := newTestMetrics(t)
for tn, tc := range sharedTestCases {
t.Run(tn, func(t *testing.T) {
- rl := New(blocked, cachedEntries, cacheReqs,
+ rl := New(
+ lru.New("source_ip"),
WithNow(mockNow),
WithSourceIPLimitPerSecond(tc.sourceIPLimit),
WithSourceIPBurstSize(tc.sourceIPBurstSize),
@@ -83,7 +84,12 @@ func TestSourceIPLimiterDenyRequestsAfterBurst(t *testing.T) {
for tn, tc := range tcs {
t.Run(tn, func(t *testing.T) {
- rl := New(blocked, cachedEntries, cacheReqs,
+ rl := New(
+ lru.New("source_ip",
+ lru.WithCachedEntriesMetric(cachedEntries),
+ lru.WithCachedRequestsMetric(cacheReqs),
+ ),
+ WithBlockedCountMetric(blocked),
WithNow(mockNow),
WithSourceIPLimitPerSecond(1),
WithSourceIPBurstSize(1),
diff --git a/internal/ratelimiter/ratelimiter.go b/internal/ratelimiter/ratelimiter.go
index 1359b19c..2e1304e2 100644
--- a/internal/ratelimiter/ratelimiter.go
+++ b/internal/ratelimiter/ratelimiter.go
@@ -17,11 +17,6 @@ const (
// DefaultSourceIPBurstSize is the maximum burst allowed per rate limiter.
// E.g. The first 100 requests within 1s will succeed, but the 101st will fail.
DefaultSourceIPBurstSize = 100
-
- // based on an avg ~4,000 unique IPs per minute
- // https://log.gprd.gitlab.net/app/lens#/edit/f7110d00-2013-11ec-8c8e-ed83b5469915?_g=h@e78830b
- defaultSourceIPItems = 5000
- defaultSourceIPExpirationInterval = time.Minute
)
// Option function to configure a RateLimiter
@@ -42,19 +37,12 @@ type RateLimiter struct {
}
// New creates a new RateLimiter with default values that can be configured via Option functions
-func New(blockCountMetric, cachedEntriesMetric *prometheus.GaugeVec, cacheRequestsMetric *prometheus.CounterVec, opts ...Option) *RateLimiter {
+func New(c *lru.Cache, opts ...Option) *RateLimiter {
rl := &RateLimiter{
now: time.Now,
sourceIPLimitPerSecond: DefaultSourceIPLimitPerSecond,
sourceIPBurstSize: DefaultSourceIPBurstSize,
- sourceIPBlockedCount: blockCountMetric,
- sourceIPCache: lru.New(
- "source_ip",
- defaultSourceIPItems,
- defaultSourceIPExpirationInterval,
- cachedEntriesMetric,
- cacheRequestsMetric,
- ),
+ sourceIPCache: c,
}
for _, opt := range opts {
@@ -85,6 +73,12 @@ func WithSourceIPBurstSize(burst int) Option {
}
}
+func WithBlockedCountMetric(m *prometheus.GaugeVec) Option {
+ return func(rl *RateLimiter) {
+ rl.sourceIPBlockedCount = m
+ }
+}
+
func (rl *RateLimiter) getSourceIPLimiter(sourceIP string) *rate.Limiter {
limiterI, _ := rl.sourceIPCache.FindOrFetch(sourceIP, sourceIP, func() (interface{}, error) {
return rate.NewLimiter(rate.Limit(rl.sourceIPLimitPerSecond), rl.sourceIPBurstSize), nil
diff --git a/internal/ratelimiter/ratelimiter_test.go b/internal/ratelimiter/ratelimiter_test.go
index 03393fb0..a85663af 100644
--- a/internal/ratelimiter/ratelimiter_test.go
+++ b/internal/ratelimiter/ratelimiter_test.go
@@ -7,6 +7,8 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/stretchr/testify/require"
+
+ "gitlab.com/gitlab-org/gitlab-pages/internal/lru"
)
var (
@@ -48,11 +50,10 @@ var sharedTestCases = map[string]struct {
func TestSourceIPAllowed(t *testing.T) {
t.Parallel()
- blocked, cachedEntries, cacheReqs := newTestMetrics(t)
-
for tn, tc := range sharedTestCases {
t.Run(tn, func(t *testing.T) {
- rl := New(blocked, cachedEntries, cacheReqs,
+ rl := New(
+ lru.New("source_ip"),
WithNow(mockNow),
WithSourceIPLimitPerSecond(tc.sourceIPLimit),
WithSourceIPBurstSize(tc.sourceIPBurstSize),
@@ -74,9 +75,9 @@ func TestSourceIPAllowed(t *testing.T) {
func TestSingleRateLimiterWithMultipleSourceIPs(t *testing.T) {
rate := 10 * time.Millisecond
- blocked, cachedEntries, cacheReqs := newTestMetrics(t)
- rl := New(blocked, cachedEntries, cacheReqs,
+ rl := New(
+ lru.New("source_ip"),
WithSourceIPLimitPerSecond(float64(1/rate)),
WithSourceIPBurstSize(1),
)
diff --git a/internal/vfs/zip/vfs.go b/internal/vfs/zip/vfs.go
index 07afa691..a9825190 100644
--- a/internal/vfs/zip/vfs.go
+++ b/internal/vfs/zip/vfs.go
@@ -87,17 +87,17 @@ func New(cfg *config.ZipServing) vfs.VFS {
// TODO: To be removed with https://gitlab.com/gitlab-org/gitlab-pages/-/issues/480
zipVFS.dataOffsetCache = lru.New(
"data-offset",
- defaultDataOffsetItems,
- defaultDataOffsetExpirationInterval,
- metrics.ZipCachedEntries,
- metrics.ZipCacheRequests,
+ lru.WithMaxSize(defaultDataOffsetItems),
+ lru.WithExpirationInterval(defaultDataOffsetExpirationInterval),
+ lru.WithCachedEntriesMetric(metrics.ZipCachedEntries),
+ lru.WithCachedRequestsMetric(metrics.ZipCacheRequests),
)
zipVFS.readlinkCache = lru.New(
"readlink",
- defaultReadlinkItems,
- defaultReadlinkExpirationInterval,
- metrics.ZipCachedEntries,
- metrics.ZipCacheRequests,
+ lru.WithMaxSize(defaultReadlinkItems),
+ lru.WithExpirationInterval(defaultReadlinkExpirationInterval),
+ lru.WithCachedEntriesMetric(metrics.ZipCachedEntries),
+ lru.WithCachedRequestsMetric(metrics.ZipCacheRequests),
)
return zipVFS