Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-pages.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVladimir Shushlin <vshushlin@gitlab.com>2020-10-29 13:15:55 +0300
committerVladimir Shushlin <vshushlin@gitlab.com>2020-10-29 13:15:55 +0300
commit6903cf41216dc3e794fd1c853e2fc82dd7618a92 (patch)
tree1a674ec5cae366bb9a94c07d53062fc5200ce15b
parentc8d3f1e782cc9df229a5af183e3538f69ca55b49 (diff)
parentb44f7c856739ed3155e8667d700d4209a01f5ada (diff)
Merge branch '469-negative-cache' into 'master'
Do not refresh errored archives Closes #469 See merge request gitlab-org/gitlab-pages!384
-rw-r--r--internal/vfs/zip/archive.go17
-rw-r--r--internal/vfs/zip/vfs.go9
-rw-r--r--internal/vfs/zip/vfs_test.go130
3 files changed, 128 insertions, 28 deletions
diff --git a/internal/vfs/zip/archive.go b/internal/vfs/zip/archive.go
index 47295dc0..df175764 100644
--- a/internal/vfs/zip/archive.go
+++ b/internal/vfs/zip/archive.go
@@ -68,11 +68,8 @@ func newArchive(fs *zipVFS, path string, openTimeout time.Duration) *zipArchive
func (a *zipArchive) openArchive(parentCtx context.Context) (err error) {
// return early if openArchive was done already in a concurrent request
- select {
- case <-a.done:
- return a.err
-
- default:
+ if ok, err := a.openStatus(); ok {
+ return err
}
ctx, cancel := context.WithTimeout(parentCtx, a.openTimeout)
@@ -283,3 +280,13 @@ func (a *zipArchive) Readlink(ctx context.Context, name string) (string, error)
func (a *zipArchive) onEvicted() {
metrics.ZipArchiveEntriesCached.Sub(float64(len(a.files)))
}
+
+func (a *zipArchive) openStatus() (bool, error) {
+ select {
+ case <-a.done:
+ return true, a.err
+
+ default:
+ return false, nil
+ }
+}
diff --git a/internal/vfs/zip/vfs.go b/internal/vfs/zip/vfs.go
index f176a1d6..3b69d1e9 100644
--- a/internal/vfs/zip/vfs.go
+++ b/internal/vfs/zip/vfs.go
@@ -158,10 +158,11 @@ func (fs *zipVFS) findOrCreateArchive(ctx context.Context, path string) (*zipArc
if found {
metrics.ZipCacheRequests.WithLabelValues("archive", "hit").Inc()
- // TODO: do not refreshed errored archives https://gitlab.com/gitlab-org/gitlab-pages/-/merge_requests/351
- if time.Until(expiry) < fs.cacheRefreshInterval {
- // refresh item
- fs.cache.SetDefault(path, archive)
+ if opened, err := archive.(*zipArchive).openStatus(); opened && err == nil {
+ if time.Until(expiry) < fs.cacheRefreshInterval {
+ // refresh item that has been opened successfully
+ fs.cache.SetDefault(path, archive)
+ }
}
} else {
archive = newArchive(fs, path, fs.openTimeout)
diff --git a/internal/vfs/zip/vfs_test.go b/internal/vfs/zip/vfs_test.go
index 878e015a..8a5e77a8 100644
--- a/internal/vfs/zip/vfs_test.go
+++ b/internal/vfs/zip/vfs_test.go
@@ -101,33 +101,125 @@ func TestVFSFindOrOpenArchiveConcurrentAccess(t *testing.T) {
}, time.Second, time.Nanosecond)
}
-func TestVFSArchiveCacheEvict(t *testing.T) {
+func TestVFSFindOrOpenArchiveRefresh(t *testing.T) {
testServerURL, cleanup := newZipFileServerURL(t, "group/zip.gitlab.io/public.zip", nil)
defer cleanup()
- path := testServerURL + "/public.zip"
+ // It should be large enough to not have flaky executions
+ const expiryInterval = 10 * time.Millisecond
- vfs := New(
- WithCacheExpirationInterval(time.Nanosecond),
- ).(*zipVFS)
+ tests := map[string]struct {
+ path string
+ expirationInterval time.Duration
+ refreshInterval time.Duration
- archivesMetric := metrics.ZipCachedEntries.WithLabelValues("archive")
- archivesCount := testutil.ToFloat64(archivesMetric)
+ expectNewArchive bool
+ expectOpenError bool
+ expectArchiveRefreshed bool
+ }{
+ "after cache expiry of successful open a new archive is returned": {
+ path: "/public.zip",
+ expirationInterval: expiryInterval,
+ expectNewArchive: true,
+ expectOpenError: false,
+ },
+ "after cache expiry of errored open a new archive is returned": {
+ path: "/unknown.zip",
+ expirationInterval: expiryInterval,
+ expectNewArchive: true,
+ expectOpenError: true,
+ },
+ "subsequent open during refresh interval does refresh archive": {
+ path: "/public.zip",
+ expirationInterval: time.Second,
+ refreshInterval: time.Second, // refresh always
+ expectNewArchive: false,
+ expectOpenError: false,
+ expectArchiveRefreshed: true,
+ },
+ "subsequent open before refresh interval does not refresh archive": {
+ path: "/public.zip",
+ expirationInterval: time.Second,
+ refreshInterval: time.Millisecond, // very short interval should not refresh
+ expectNewArchive: false,
+ expectOpenError: false,
+ expectArchiveRefreshed: false,
+ },
+ "subsequent open of errored archive during refresh interval does not refresh": {
+ path: "/unknown.zip",
+ expirationInterval: time.Second,
+ refreshInterval: time.Second, // refresh always (if not error)
+ expectNewArchive: false,
+ expectOpenError: true,
+ expectArchiveRefreshed: false,
+ },
+ }
- // create a new archive and increase counters
- archive, err := vfs.Root(context.Background(), path)
- require.NoError(t, err)
- require.NotNil(t, archive)
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ withExpectedArchiveCount(t, 1, func(t *testing.T) {
+ vfs := New(
+ WithCacheExpirationInterval(test.expirationInterval),
+ WithCacheRefreshInterval(test.refreshInterval),
+ ).(*zipVFS)
+
+ path := testServerURL + test.path
+
+ // create a new archive and increase counters
+ archive1, err1 := vfs.findOrOpenArchive(context.Background(), path)
+ if test.expectOpenError {
+ require.Error(t, err1)
+ require.Nil(t, archive1)
+ } else {
+ require.NoError(t, err1)
+ }
+
+ item1, exp1, found := vfs.cache.GetWithExpiration(path)
+ require.True(t, found)
+
+ // give some time to for timeouts to fire
+ time.Sleep(expiryInterval)
+
+ if test.expectNewArchive {
+ // should return a new archive
+ archive2, err2 := vfs.findOrOpenArchive(context.Background(), path)
+ if test.expectOpenError {
+ require.Error(t, err2)
+ require.Nil(t, archive2)
+ } else {
+ require.NoError(t, err2)
+ require.NotEqual(t, archive1, archive2, "a new archive should be returned")
+ }
+ return
+ }
+
+ // should return exactly the same archive
+ archive2, err2 := vfs.findOrOpenArchive(context.Background(), path)
+ require.Equal(t, archive1, archive2, "same archive is returned")
+ require.Equal(t, err1, err2, "same error for the same archive")
+
+ item2, exp2, found := vfs.cache.GetWithExpiration(path)
+ require.True(t, found)
+ require.Equal(t, item1, item2, "same item is returned")
+
+ if test.expectArchiveRefreshed {
+ require.Greater(t, exp2.UnixNano(), exp1.UnixNano(), "archive should be refreshed")
+ } else {
+ require.Equal(t, exp1.UnixNano(), exp2.UnixNano(), "archive has not been refreshed")
+ }
+ })
+ })
+ }
+}
- // wait for archive to expire
- time.Sleep(time.Nanosecond)
+func withExpectedArchiveCount(t *testing.T, archiveCount int, fn func(t *testing.T)) {
+ t.Helper()
- // a new object is created
- archive2, err := vfs.Root(context.Background(), path)
- require.NoError(t, err)
- require.NotNil(t, archive2)
- require.NotEqual(t, archive, archive2, "a different archive is returned")
+ archivesMetric := metrics.ZipCachedEntries.WithLabelValues("archive")
+ archivesCount := testutil.ToFloat64(archivesMetric)
+
+ fn(t)
archivesCountEnd := testutil.ToFloat64(archivesMetric)
- require.Equal(t, float64(1), archivesCountEnd-archivesCount, "all expired archives are evicted")
+ require.Equal(t, float64(archiveCount), archivesCountEnd-archivesCount, "exact number of archives is cached")
}