Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitaly.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Steinhardt <psteinhardt@gitlab.com>2021-09-23 16:32:17 +0300
committerPatrick Steinhardt <psteinhardt@gitlab.com>2021-10-06 13:39:32 +0300
commitc98512a8889b454b08529e6f00cfa7a852a18017 (patch)
tree0f896abaa5650bbfa8a5a1125adb2b1d3c7792e1
parentb2734a6e09037f68d4619b62a8a0b578c8b6707a (diff)
catfile: Disentangle metrics from managing the cache
We will split up the cache into two caches, one for object readers and one for object info readers. For this, we will want to reuse the "stack" that keeps track of cached processes. But due to it currently being entangled with Prometheus metrics, this refactoring is harder to do than one would want it to be. Pull up metrics into the high-level functions such that we can easily split out the low-level stack at a later point. This also has the benefit that we're not handling metrics in the critical section anymore.
-rw-r--r--internal/git/catfile/cache.go17
1 files changed, 11 insertions, 6 deletions
diff --git a/internal/git/catfile/cache.go b/internal/git/catfile/cache.go
index c8ecb71bc..55ca5b706 100644
--- a/internal/git/catfile/cache.go
+++ b/internal/git/catfile/cache.go
@@ -197,9 +197,12 @@ func (c *ProcessCache) BatchProcess(ctx context.Context, repo git.RepositoryExec
if entry, ok := c.checkout(cacheKey); ok {
go c.returnWhenDone(requestDone, cacheKey, entry.value, entry.cancel)
+ c.catfileCacheCounter.WithLabelValues("hit").Inc()
return entry.value, nil
}
+ c.catfileCacheCounter.WithLabelValues("miss").Inc()
+
// We have not found any cached process, so we need to create a new one. In this
// case, we need to detach the process from the current context such that it does
// not get killed when the current context is done. Note that while we explicitly
@@ -267,18 +270,21 @@ func (c *ProcessCache) returnWhenDone(done <-chan struct{}, cacheKey key, batch
return
}
- c.add(cacheKey, batch, cancel)
+ if replaced := c.add(cacheKey, batch, cancel); replaced {
+ c.catfileCacheCounter.WithLabelValues("duplicate").Inc()
+ }
}
// add adds a key, value pair to c. If there are too many keys in c
// already add will evict old keys until the length is OK again.
-func (c *ProcessCache) add(k key, b *batch, cancel func()) {
+func (c *ProcessCache) add(k key, b *batch, cancel func()) bool {
c.entriesMutex.Lock()
defer c.entriesMutex.Unlock()
+ replacedExisting := false
if i, ok := c.lookup(k); ok {
- c.catfileCacheCounter.WithLabelValues("duplicate").Inc()
c.delete(i, true)
+ replacedExisting = true
}
ent := &entry{
@@ -294,6 +300,8 @@ func (c *ProcessCache) add(k key, b *batch, cancel func()) {
}
c.catfileCacheMembers.Set(float64(c.len()))
+
+ return replacedExisting
}
func (c *ProcessCache) head() *entry { return c.entries[0] }
@@ -307,12 +315,9 @@ func (c *ProcessCache) checkout(k key) (*entry, bool) {
i, ok := c.lookup(k)
if !ok {
- c.catfileCacheCounter.WithLabelValues("miss").Inc()
return nil, false
}
- c.catfileCacheCounter.WithLabelValues("hit").Inc()
-
entry := c.entries[i]
c.delete(i, false)
return entry, true