Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-pages.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--acceptance_test.go5
-rw-r--r--go.mod1
-rw-r--r--go.sum6
-rw-r--r--internal/vfs/zip/archive.go65
-rw-r--r--internal/vfs/zip/archive_test.go92
-rw-r--r--internal/vfs/zip/lru_cache.go61
-rw-r--r--internal/vfs/zip/vfs.go30
-rw-r--r--internal/vfs/zip/vfs_test.go4
-rw-r--r--metrics/metrics.go19
9 files changed, 222 insertions, 61 deletions
diff --git a/acceptance_test.go b/acceptance_test.go
index 0b1442f0..69ec8742 100644
--- a/acceptance_test.go
+++ b/acceptance_test.go
@@ -459,7 +459,8 @@ func TestPrometheusMetricsCanBeScraped(t *testing.T) {
defer teardown()
// need to call an actual resource to populate certain metrics e.g. gitlab_pages_domains_source_api_requests_total
- res, err := GetPageFromListener(t, httpListener, "zip.gitlab.io", "/index.html/")
+ res, err := GetPageFromListener(t, httpListener, "zip.gitlab.io",
+ "/symlink.html")
require.NoError(t, err)
require.Equal(t, http.StatusOK, res.StatusCode)
@@ -496,7 +497,7 @@ func TestPrometheusMetricsCanBeScraped(t *testing.T) {
// zip archives
require.Contains(t, string(body), "gitlab_pages_zip_opened")
require.Contains(t, string(body), "gitlab_pages_zip_cache_requests")
- require.Contains(t, string(body), "gitlab_pages_zip_cached_archives")
+ require.Contains(t, string(body), "gitlab_pages_zip_cached_entries")
require.Contains(t, string(body), "gitlab_pages_zip_archive_entries_cached")
require.Contains(t, string(body), "gitlab_pages_zip_opened_entries_count")
}
diff --git a/go.mod b/go.mod
index d9035bee..74582a65 100644
--- a/go.mod
+++ b/go.mod
@@ -13,6 +13,7 @@ require (
github.com/gorilla/securecookie v1.1.1
github.com/gorilla/sessions v1.2.0
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
+ github.com/karlseguin/ccache/v2 v2.0.6
github.com/karrick/godirwalk v1.10.12
github.com/kr/text v0.2.0 // indirect
github.com/namsral/flag v1.7.4-pre
diff --git a/go.sum b/go.sum
index 8e2a2a3c..9e9ef22e 100644
--- a/go.sum
+++ b/go.sum
@@ -176,6 +176,10 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/karlseguin/ccache/v2 v2.0.6 h1:jFCLz4bF4EPfuCcvESAgYNClkEb31LV3WzyOwLlFz7w=
+github.com/karlseguin/ccache/v2 v2.0.6/go.mod h1:2BDThcfQMf/c0jnZowt16eW405XIqZPavt+HoYEtcxQ=
+github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA=
+github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/karrick/godirwalk v1.10.12 h1:BqUm+LuJcXjGv1d2mj3gBiQyrQ57a0rYoAmhvJQ7RDU=
github.com/karrick/godirwalk v1.10.12/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk=
@@ -327,6 +331,8 @@ github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPU
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad h1:W0LEBv82YCGEtcmPA3uNZBI33/qF//HAAs3MawDjRa0=
github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM=
+github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
+github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
diff --git a/internal/vfs/zip/archive.go b/internal/vfs/zip/archive.go
index 5fc55b0d..548ba651 100644
--- a/internal/vfs/zip/archive.go
+++ b/internal/vfs/zip/archive.go
@@ -8,8 +8,10 @@ import (
"io"
"os"
"path/filepath"
+ "strconv"
"strings"
"sync"
+ "sync/atomic"
"time"
log "github.com/sirupsen/logrus"
@@ -30,32 +32,38 @@ const (
var (
errNotSymlink = errors.New("not a symlink")
errSymlinkSize = errors.New("symlink too long")
+ errNotFile = errors.New("not a file")
)
// zipArchive implements the vfs.Root interface.
// It represents a zip archive saving all its files in memory.
// It holds an httprange.Resource that can be read with httprange.RangedReader in chunks.
type zipArchive struct {
+ fs *zipVFS
+
path string
once sync.Once
done chan struct{}
openTimeout time.Duration
+ cacheNamespace string
+
resource *httprange.Resource
reader *httprange.RangedReader
archive *zip.Reader
err error
- // TODO: add metrics https://gitlab.com/gitlab-org/gitlab-pages/-/issues/423
files map[string]*zip.File
}
-func newArchive(path string, openTimeout time.Duration) *zipArchive {
+func newArchive(fs *zipVFS, path string, openTimeout time.Duration) *zipArchive {
return &zipArchive{
- path: path,
- done: make(chan struct{}),
- files: make(map[string]*zip.File),
- openTimeout: openTimeout,
+ fs: fs,
+ path: path,
+ done: make(chan struct{}),
+ files: make(map[string]*zip.File),
+ openTimeout: openTimeout,
+ cacheNamespace: strconv.FormatInt(atomic.AddInt64(&fs.archiveCount, 1), 10) + ":",
}
}
@@ -158,14 +166,19 @@ func (a *zipArchive) Open(ctx context.Context, name string) (vfs.File, error) {
return nil, os.ErrNotExist
}
- // TODO: cache dataOffsets of files https://gitlab.com/gitlab-org/gitlab-pages/-/issues/461
- dataOffset, err := file.DataOffset()
+ if !file.Mode().IsRegular() {
+ return nil, errNotFile
+ }
+
+ dataOffset, err := a.fs.dataOffsetCache.findOrFetch(a.cacheNamespace, name, func() (interface{}, error) {
+ return file.DataOffset()
+ })
if err != nil {
return nil, err
}
// only read from dataOffset up to the size of the compressed file
- reader := a.reader.SectionReader(ctx, dataOffset, int64(file.CompressedSize64))
+ reader := a.reader.SectionReader(ctx, dataOffset.(int64), int64(file.CompressedSize64))
switch file.Method {
case zip.Deflate:
@@ -198,28 +211,36 @@ func (a *zipArchive) Readlink(ctx context.Context, name string) (string, error)
return "", errNotSymlink
}
- rc, err := file.Open()
- if err != nil {
- return "", err
- }
- defer rc.Close()
+ symlinkValue, err := a.fs.readlinkCache.findOrFetch(a.cacheNamespace, name, func() (interface{}, error) {
+ rc, err := file.Open()
+ if err != nil {
+ return nil, err
+ }
+ defer rc.Close()
- symlink := make([]byte, maxSymlinkSize+1)
+ var link [maxSymlinkSize + 1]byte
+
+ // read up to len(symlink) bytes from the link file
+ n, err := io.ReadFull(rc, link[:])
+ if err != nil && err != io.ErrUnexpectedEOF {
+ // if err == io.ErrUnexpectedEOF the link is smaller than len(symlink) so it's OK to not return it
+ return nil, err
+ }
- // read up to len(symlink) bytes from the link file
- n, err := io.ReadFull(rc, symlink)
- if err != nil && err != io.ErrUnexpectedEOF {
- // if err == io.ErrUnexpectedEOF the link is smaller than len(symlink) so it's OK to not return it
+ return string(link[:n]), nil
+ })
+ if err != nil {
return "", err
}
+ symlink := symlinkValue.(string)
+
// return errSymlinkSize if the number of bytes read from the link is too big
- if n > maxSymlinkSize {
+ if len(symlink) > maxSymlinkSize {
return "", errSymlinkSize
}
- // only return the n bytes read from the link
- return string(symlink[:n]), nil
+ return symlink, nil
}
// onEvicted called by the zipVFS.cache when an archive is removed from the cache
diff --git a/internal/vfs/zip/archive_test.go b/internal/vfs/zip/archive_test.go
index bb094038..bd7627b1 100644
--- a/internal/vfs/zip/archive_test.go
+++ b/internal/vfs/zip/archive_test.go
@@ -6,6 +6,7 @@ import (
"net/http"
"net/http/httptest"
"os"
+ "sync/atomic"
"testing"
"time"
@@ -17,7 +18,7 @@ import (
var chdirSet = false
func TestOpen(t *testing.T) {
- zip, cleanup := openZipArchive(t)
+ zip, cleanup := openZipArchive(t, nil)
defer cleanup()
tests := map[string]struct {
@@ -38,11 +39,11 @@ func TestOpen(t *testing.T) {
"file_exists_symlink": {
file: "symlink.html",
expectedContent: "subdir/linked.html",
- expectedErr: nil,
+ expectedErr: errNotFile,
},
"is_dir": {
file: "subdir",
- expectedErr: nil,
+ expectedErr: errNotFile,
},
"file_does_not_exist": {
file: "unknown.html",
@@ -59,12 +60,6 @@ func TestOpen(t *testing.T) {
}
require.NoError(t, err)
-
- if tt.expectedContent == "" {
- // cannot ioutil.ReadAll dirs but zip.Open should not fail
- return
- }
-
data, err := ioutil.ReadAll(f)
require.NoError(t, err)
@@ -74,8 +69,36 @@ func TestOpen(t *testing.T) {
}
}
+func TestOpenCached(t *testing.T) {
+ var requests int64
+ zip, cleanup := openZipArchive(t, &requests)
+ defer cleanup()
+
+ t.Run("open file first time", func(t *testing.T) {
+ requestsStart := requests
+ f, err := zip.Open(context.Background(), "index.html")
+ require.NoError(t, err)
+ defer f.Close()
+
+ _, err = ioutil.ReadAll(f)
+ require.NoError(t, err)
+ require.Equal(t, int64(2), atomic.LoadInt64(&requests)-requestsStart, "we expect two requests to read file: data offset and content")
+ })
+
+ t.Run("open file second time", func(t *testing.T) {
+ requestsStart := atomic.LoadInt64(&requests)
+ f, err := zip.Open(context.Background(), "index.html")
+ require.NoError(t, err)
+ defer f.Close()
+
+ _, err = ioutil.ReadAll(f)
+ require.NoError(t, err)
+ require.Equal(t, int64(1), atomic.LoadInt64(&requests)-requestsStart, "we expect one request to read file with cached data offset")
+ })
+}
+
func TestLstat(t *testing.T) {
- zip, cleanup := openZipArchive(t)
+ zip, cleanup := openZipArchive(t, nil)
defer cleanup()
tests := map[string]struct {
@@ -135,7 +158,7 @@ func TestLstat(t *testing.T) {
}
func TestReadLink(t *testing.T) {
- zip, cleanup := openZipArchive(t)
+ zip, cleanup := openZipArchive(t, nil)
defer cleanup()
tests := map[string]struct {
@@ -177,11 +200,32 @@ func TestReadLink(t *testing.T) {
}
}
+func TestReadlinkCached(t *testing.T) {
+ var requests int64
+ zip, cleanup := openZipArchive(t, &requests)
+ defer cleanup()
+
+ t.Run("readlink first time", func(t *testing.T) {
+ requestsStart := atomic.LoadInt64(&requests)
+ _, err := zip.Readlink(context.Background(), "symlink.html")
+ require.NoError(t, err)
+ require.Equal(t, int64(2), atomic.LoadInt64(&requests)-requestsStart, "we expect two requests to read symlink: data offset and link")
+ })
+
+ t.Run("readlink second time", func(t *testing.T) {
+ requestsStart := atomic.LoadInt64(&requests)
+ _, err := zip.Readlink(context.Background(), "symlink.html")
+ require.NoError(t, err)
+ require.Equal(t, int64(0), atomic.LoadInt64(&requests)-requestsStart, "we expect no additional requests to read cached symlink")
+ })
+}
+
func TestArchiveCanBeReadAfterOpenCtxCanceled(t *testing.T) {
- testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip")
+ testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil)
defer cleanup()
- zip := newArchive(testServerURL+"/public.zip", time.Second)
+ fs := New().(*zipVFS)
+ zip := newArchive(fs, testServerURL+"/public.zip", time.Second)
ctx, cancel := context.WithCancel(context.Background())
cancel()
@@ -200,10 +244,11 @@ func TestArchiveCanBeReadAfterOpenCtxCanceled(t *testing.T) {
}
func TestReadArchiveFails(t *testing.T) {
- testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip")
+ testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil)
defer cleanup()
- zip := newArchive(testServerURL+"/unkown.html", time.Second)
+ fs := New().(*zipVFS)
+ zip := newArchive(fs, testServerURL+"/unkown.html", time.Second)
err := zip.openArchive(context.Background())
require.Error(t, err)
@@ -213,12 +258,17 @@ func TestReadArchiveFails(t *testing.T) {
require.EqualError(t, err, os.ErrNotExist.Error())
}
-func openZipArchive(t *testing.T) (*zipArchive, func()) {
+func openZipArchive(t *testing.T, requests *int64) (*zipArchive, func()) {
t.Helper()
- testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip")
+ if requests == nil {
+ requests = new(int64)
+ }
+
+ testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", requests)
- zip := newArchive(testServerURL+"/public.zip", time.Second)
+ fs := New().(*zipVFS)
+ zip := newArchive(fs, testServerURL+"/public.zip", time.Second)
err := zip.openArchive(context.Background())
require.NoError(t, err)
@@ -227,13 +277,14 @@ func openZipArchive(t *testing.T) (*zipArchive, func()) {
// public/subdir/ public/subdir/hello.html public/subdir/linked.html
// public/bad_symlink.html public/subdir/2bp3Qzs...
require.NotZero(t, zip.files)
+ require.Equal(t, int64(3), atomic.LoadInt64(requests), "we expect three requests to open ZIP archive: size and two to seek central directory")
return zip, func() {
cleanup()
}
}
-func newZipFileServerURL(t *testing.T, zipFilePath string) (string, func()) {
+func newZipFileServerURL(t *testing.T, zipFilePath string, requests *int64) (string, func()) {
t.Helper()
chdir := testhelpers.ChdirInPath(t, "../../../shared/pages", &chdirSet)
@@ -241,6 +292,9 @@ func newZipFileServerURL(t *testing.T, zipFilePath string) (string, func()) {
m := http.NewServeMux()
m.HandleFunc("/public.zip", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, zipFilePath)
+ if requests != nil {
+ atomic.AddInt64(requests, 1)
+ }
}))
testServer := httptest.NewServer(m)
diff --git a/internal/vfs/zip/lru_cache.go b/internal/vfs/zip/lru_cache.go
new file mode 100644
index 00000000..fed5c360
--- /dev/null
+++ b/internal/vfs/zip/lru_cache.go
@@ -0,0 +1,61 @@
+package zip
+
+import (
+ "time"
+
+ "github.com/karlseguin/ccache/v2"
+
+ "gitlab.com/gitlab-org/gitlab-pages/metrics"
+)
+
+// lruCacheGetPerPromote is a value that makes the item to be promoted
+// it is taken arbitrally as a sane value indicating that the item
+// was frequently picked
+// promotion moves the item to the front of the LRU list
+const lruCacheGetsPerPromote = 64
+
+// lruCacheItemsToPruneDiv is a value that indicates how much items
+// needs to be pruned on OOM, this prunes 1/16 of items
+const lruCacheItemsToPruneDiv = 16
+
+type lruCache struct {
+ op string
+ duration time.Duration
+ cache *ccache.Cache
+}
+
+func newLruCache(op string, maxEntries uint32, duration time.Duration) *lruCache {
+ configuration := ccache.Configure()
+ configuration.MaxSize(int64(maxEntries))
+ configuration.ItemsToPrune(maxEntries / lruCacheItemsToPruneDiv)
+ configuration.GetsPerPromote(lruCacheGetsPerPromote) // if item gets requested frequently promote it
+ configuration.OnDelete(func(*ccache.Item) {
+ metrics.ZipCachedEntries.WithLabelValues(op).Dec()
+ })
+
+ return &lruCache{
+ cache: ccache.New(configuration),
+ duration: duration,
+ }
+}
+
+func (c *lruCache) findOrFetch(cacheNamespace, key string, fetchFn func() (interface{}, error)) (interface{}, error) {
+ item := c.cache.Get(cacheNamespace + key)
+
+ if item != nil && !item.Expired() {
+ metrics.ZipCacheRequests.WithLabelValues(c.op, "hit").Inc()
+ return item.Value(), nil
+ }
+
+ value, err := fetchFn()
+ if err != nil {
+ metrics.ZipCacheRequests.WithLabelValues(c.op, "error").Inc()
+ return nil, err
+ }
+
+ metrics.ZipCacheRequests.WithLabelValues(c.op, "miss").Inc()
+ metrics.ZipCachedEntries.WithLabelValues(c.op).Inc()
+
+ c.cache.Set(cacheNamespace+key, value, c.duration)
+ return value, nil
+}
diff --git a/internal/vfs/zip/vfs.go b/internal/vfs/zip/vfs.go
index fd0855f7..d6436010 100644
--- a/internal/vfs/zip/vfs.go
+++ b/internal/vfs/zip/vfs.go
@@ -17,6 +17,16 @@ const (
defaultCacheExpirationInterval = time.Minute
defaultCacheCleanupInterval = time.Minute / 2
defaultCacheRefreshInterval = time.Minute / 2
+
+ // we assume that each item costs around 100 bytes
+ // this gives around 5MB of raw memory needed without acceleration structures
+ defaultDataOffsetItems = 50000
+ defaultDataOffsetExpirationInterval = time.Hour
+
+ // we assume that each item costs around 200 bytes
+ // this gives around 2MB of raw memory needed without acceleration structures
+ defaultReadlinkItems = 10000
+ defaultReadlinkExpirationInterval = time.Hour
)
var (
@@ -26,17 +36,23 @@ var (
// zipVFS is a simple cached implementation of the vfs.VFS interface
type zipVFS struct {
cache *cache.Cache
+
+ dataOffsetCache *lruCache
+ readlinkCache *lruCache
+
+ archiveCount int64
}
// New creates a zipVFS instance that can be used by a serving request
func New() vfs.VFS {
zipVFS := &zipVFS{
- // TODO: add cache operation callbacks https://gitlab.com/gitlab-org/gitlab-pages/-/issues/465
- cache: cache.New(defaultCacheExpirationInterval, defaultCacheCleanupInterval),
+ cache: cache.New(defaultCacheExpirationInterval, defaultCacheCleanupInterval),
+ dataOffsetCache: newLruCache("data-offset", defaultDataOffsetItems, defaultDataOffsetExpirationInterval),
+ readlinkCache: newLruCache("readlink", defaultReadlinkItems, defaultReadlinkExpirationInterval),
}
zipVFS.cache.OnEvicted(func(s string, i interface{}) {
- metrics.ZipCachedArchives.Dec()
+ metrics.ZipCachedEntries.WithLabelValues("archive").Dec()
i.(*zipArchive).onEvicted()
})
@@ -78,7 +94,7 @@ func (fs *zipVFS) Name() string {
func (fs *zipVFS) findOrOpenArchive(ctx context.Context, path string) (*zipArchive, error) {
archive, expiry, found := fs.cache.GetWithExpiration(path)
if found {
- metrics.ZipServingArchiveCache.WithLabelValues("hit").Inc()
+ metrics.ZipCacheRequests.WithLabelValues("archive", "hit").Inc()
// TODO: do not refreshed errored archives https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/351
if time.Until(expiry) < defaultCacheRefreshInterval {
@@ -86,15 +102,15 @@ func (fs *zipVFS) findOrOpenArchive(ctx context.Context, path string) (*zipArchi
fs.cache.SetDefault(path, archive)
}
} else {
- archive = newArchive(path, DefaultOpenTimeout)
+ archive = newArchive(fs, path, DefaultOpenTimeout)
// if adding the archive to the cache fails it means it's already been added before
// this is done to find concurrent additions.
if fs.cache.Add(path, archive, cache.DefaultExpiration) != nil {
return nil, errAlreadyCached
}
- metrics.ZipServingArchiveCache.WithLabelValues("miss").Inc()
- metrics.ZipCachedArchives.Inc()
+ metrics.ZipCacheRequests.WithLabelValues("archive", "miss").Inc()
+ metrics.ZipCachedEntries.WithLabelValues("archive").Inc()
}
zipArchive := archive.(*zipArchive)
diff --git a/internal/vfs/zip/vfs_test.go b/internal/vfs/zip/vfs_test.go
index 62b5f62c..434fc84c 100644
--- a/internal/vfs/zip/vfs_test.go
+++ b/internal/vfs/zip/vfs_test.go
@@ -10,7 +10,7 @@ import (
)
func TestVFSRoot(t *testing.T) {
- url, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip")
+ url, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil)
defer cleanup()
tests := map[string]struct {
@@ -63,7 +63,7 @@ func TestVFSRoot(t *testing.T) {
}
func TestVFSFindOrOpenArchiveConcurrentAccess(t *testing.T) {
- testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip")
+ testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil)
defer cleanup()
path := testServerURL + "/public.zip"
diff --git a/metrics/metrics.go b/metrics/metrics.go
index d6dc8ce1..db7cae9a 100644
--- a/metrics/metrics.go
+++ b/metrics/metrics.go
@@ -164,21 +164,22 @@ var (
[]string{"state"},
)
- // ZipServingArchiveCache is the number of zip archive cache hits/misses
- ZipServingArchiveCache = prometheus.NewCounterVec(
+ // ZipCacheRequests is the number of cache hits/misses
+ ZipCacheRequests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "gitlab_pages_zip_cache_requests",
Help: "The number of zip archives cache hits/misses",
},
- []string{"cache"},
+ []string{"op", "cache"},
)
- // ZipCachedArchives is the number of zip archives currently in the cache
- ZipCachedArchives = prometheus.NewGauge(
+ // ZipCachedArchives is the number of entries in the cache
+ ZipCachedEntries = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
- Name: "gitlab_pages_zip_cached_archives",
- Help: "The number of zip archives currently in the cache",
+ Name: "gitlab_pages_zip_cached_entries",
+ Help: "The number of entries in the cache",
},
+ []string{"op"},
)
// ZipArchiveEntriesCached is the number of files per zip archive currently
@@ -225,8 +226,8 @@ func MustRegister() {
HTTPRangeOpenRequests,
ZipOpened,
ZipOpenedEntriesCount,
- ZipServingArchiveCache,
+ ZipCacheRequests,
ZipArchiveEntriesCached,
- ZipCachedArchives,
+ ZipCachedEntries,
)
}