Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitaly.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Cai <jcai@gitlab.com>2022-03-16 23:41:08 +0300
committerJohn Cai <jcai@gitlab.com>2022-03-16 23:41:08 +0300
commit5afe91e6ecfd5a6d6ee3de6cc767e277c1091c6e (patch)
treed6528e135415ee0d53ec888a3dbc9d57ea4480a3
parent8966a5cbdb29f33af712146c47ae1c86978a32a1 (diff)
parent08d51eee2304870f7dc6716d256e7f3e5d49f81f (diff)
Merge branch 'pks-housekeeping-improved-metrics' into 'master'
housekeeping: Improve visibility into performed maintenance tasks Closes #4103 See merge request gitlab-org/gitaly!4406
-rw-r--r--.gitlab-ci.yml4
-rw-r--r--internal/git/housekeeping/clean_stale_data.go86
-rw-r--r--internal/git/housekeeping/clean_stale_data_test.go246
-rw-r--r--internal/git/housekeeping/manager.go13
-rw-r--r--internal/git/housekeeping/optimize_repository.go34
-rw-r--r--internal/git/housekeeping/optimize_repository_ext_test.go16
-rw-r--r--internal/git/housekeeping/optimize_repository_test.go17
7 files changed, 283 insertions, 133 deletions
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index a20e24297..84c9940f1 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -192,7 +192,9 @@ test:
test:coverage:
<<: *test_definition
script:
- - make cover
+ # We need to explicitly build all prerequisites so that we can run tests unprivileged.
+ - make build prepare-tests $(pwd)/_build/tools/gocover-cobertura
+ - setpriv --reuid=9999 --regid=9999 --clear-groups --no-new-privs env HOME=/dev/null make cover SKIP_RSPEC_BUILD=YesPlease
artifacts:
reports:
cobertura: _build/cover/cobertura.xml
diff --git a/internal/git/housekeeping/clean_stale_data.go b/internal/git/housekeeping/clean_stale_data.go
index c1c2c9797..2cea9164a 100644
--- a/internal/git/housekeeping/clean_stale_data.go
+++ b/internal/git/housekeeping/clean_stale_data.go
@@ -11,7 +11,6 @@ import (
"time"
"github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus"
- "github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitaly/v14/internal/git"
"gitlab.com/gitlab-org/gitaly/v14/internal/git/localrepo"
@@ -31,25 +30,10 @@ const (
packedRefsNewGracePeriod = 15 * time.Minute
)
-var (
- lockfiles = []string{
- "config.lock",
- "HEAD.lock",
- "objects/info/commit-graphs/commit-graph-chain.lock",
- }
-
- optimizeEmptyDirRemovalTotals = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "gitaly",
- Subsystem: "repository",
- Name: "optimizerepository_empty_dir_removal_total",
- Help: "Total number of empty directories removed by OptimizeRepository RPC",
- },
- )
-)
-
-func init() {
- prometheus.MustRegister(optimizeEmptyDirRemovalTotals)
+var lockfiles = []string{
+ "config.lock",
+ "HEAD.lock",
+ "objects/info/commit-graphs/commit-graph-chain.lock",
}
type staleFileFinderFn func(context.Context, string) ([]string, error)
@@ -83,6 +67,8 @@ func (m *RepositoryManager) CleanStaleData(ctx context.Context, repo *localrepo.
filesToPrune = append(filesToPrune, staleFiles...)
logEntry = logEntry.WithField(field, len(staleFiles))
+
+ m.prunedFilesTotal.WithLabelValues(field).Add(float64(len(staleFiles)))
}
unremovableFiles := 0
@@ -98,12 +84,15 @@ func (m *RepositoryManager) CleanStaleData(ctx context.Context, repo *localrepo.
}
}
- if len(filesToPrune) > 0 {
- logEntry.WithField("failures", unremovableFiles).Info("removed files")
+ prunedRefDirs, err := removeRefEmptyDirs(ctx, repo)
+ m.prunedFilesTotal.WithLabelValues("refsemptydir").Add(float64(prunedRefDirs))
+ if err != nil {
+ return fmt.Errorf("housekeeping could not remove empty refs: %w", err)
}
+ logEntry = logEntry.WithField("refsemptydir", prunedRefDirs)
- if err := removeRefEmptyDirs(ctx, repo); err != nil {
- return fmt.Errorf("housekeeping could not remove empty refs: %w", err)
+ if len(filesToPrune) > 0 || prunedRefDirs > 0 {
+ logEntry.WithField("failures", unremovableFiles).Info("removed files")
}
// TODO: https://gitlab.com/gitlab-org/gitaly/-/issues/3987
@@ -411,10 +400,10 @@ func fixDirectoryPermissions(ctx context.Context, path string, retriedPaths map[
})
}
-func removeRefEmptyDirs(ctx context.Context, repository *localrepo.Repo) error {
+func removeRefEmptyDirs(ctx context.Context, repository *localrepo.Repo) (int, error) {
rPath, err := repository.Path()
if err != nil {
- return err
+ return 0, err
}
repoRefsPath := filepath.Join(rPath, "refs")
@@ -422,26 +411,28 @@ func removeRefEmptyDirs(ctx context.Context, repository *localrepo.Repo) error {
// recursive functions for each subdirectory
entries, err := os.ReadDir(repoRefsPath)
if err != nil {
- return err
+ return 0, err
}
+ prunedDirsTotal := 0
for _, e := range entries {
if !e.IsDir() {
continue
}
- ePath := filepath.Join(repoRefsPath, e.Name())
- if err := removeEmptyDirs(ctx, ePath); err != nil {
- return err
+ prunedDirs, err := removeEmptyDirs(ctx, filepath.Join(repoRefsPath, e.Name()))
+ if err != nil {
+ return prunedDirsTotal, err
}
+ prunedDirsTotal += prunedDirs
}
- return nil
+ return prunedDirsTotal, nil
}
-func removeEmptyDirs(ctx context.Context, target string) error {
+func removeEmptyDirs(ctx context.Context, target string) (int, error) {
if err := ctx.Err(); err != nil {
- return err
+ return 0, err
}
// We need to stat the directory early on in order to get its current mtime. If we
@@ -450,54 +441,55 @@ func removeEmptyDirs(ctx context.Context, target string) error {
dirStat, err := os.Stat(target)
if err != nil {
if os.IsNotExist(err) {
- return nil
+ return 0, nil
}
- return err
+ return 0, err
}
entries, err := os.ReadDir(target)
switch {
case os.IsNotExist(err):
- return nil // race condition: someone else deleted it first
+ return 0, nil // race condition: someone else deleted it first
case err != nil:
- return err
+ return 0, err
}
+ prunedDirsTotal := 0
for _, e := range entries {
if !e.IsDir() {
continue
}
- ePath := filepath.Join(target, e.Name())
- if err := removeEmptyDirs(ctx, ePath); err != nil {
- return err
+ prunedDirs, err := removeEmptyDirs(ctx, filepath.Join(target, e.Name()))
+ if err != nil {
+ return prunedDirsTotal, err
}
+ prunedDirsTotal += prunedDirs
}
// If the directory is older than the grace period for empty refs, then we can
// consider it for deletion in case it's empty.
if time.Since(dirStat.ModTime()) < emptyRefsGracePeriod {
- return nil
+ return prunedDirsTotal, nil
}
// recheck entries now that we have potentially removed some dirs
entries, err = os.ReadDir(target)
if err != nil && !os.IsNotExist(err) {
- return err
+ return prunedDirsTotal, err
}
if len(entries) > 0 {
- return nil
+ return prunedDirsTotal, nil
}
switch err := os.Remove(target); {
case os.IsNotExist(err):
- return nil // race condition: someone else deleted it first
+ return prunedDirsTotal, nil // race condition: someone else deleted it first
case err != nil:
- return err
+ return prunedDirsTotal, err
}
- optimizeEmptyDirRemovalTotals.Inc()
- return nil
+ return prunedDirsTotal + 1, nil
}
func myLogger(ctx context.Context) *log.Entry {
diff --git a/internal/git/housekeeping/clean_stale_data_test.go b/internal/git/housekeeping/clean_stale_data_test.go
index 881b1c374..28e52621c 100644
--- a/internal/git/housekeeping/clean_stale_data_test.go
+++ b/internal/git/housekeeping/clean_stale_data_test.go
@@ -6,9 +6,11 @@ import (
"os"
"path/filepath"
"runtime"
+ "strings"
"testing"
"time"
+ "github.com/prometheus/client_golang/prometheus/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly/v14/internal/backchannel"
@@ -116,10 +118,47 @@ func d(name string, mode os.FileMode, age time.Duration, finalState entryFinalSt
return &dirEntry{fileEntry{name, mode, age, finalState}, entries}
}
+type cleanStaleDataMetrics struct {
+ objects int
+ locks int
+ refs int
+ reflocks int
+ refsEmptyDir int
+ packedRefsLock int
+ packedRefsNew int
+}
+
+func requireCleanStaleDataMetrics(t *testing.T, m *RepositoryManager, metrics cleanStaleDataMetrics) {
+ t.Helper()
+
+ var builder strings.Builder
+
+ _, err := builder.WriteString("# HELP gitaly_housekeeping_pruned_files_total Total number of files pruned\n")
+ require.NoError(t, err)
+ _, err = builder.WriteString("# TYPE gitaly_housekeeping_pruned_files_total counter\n")
+ require.NoError(t, err)
+
+ for metric, expectedValue := range map[string]int{
+ "objects": metrics.objects,
+ "locks": metrics.locks,
+ "refs": metrics.refs,
+ "reflocks": metrics.reflocks,
+ "packedrefslock": metrics.packedRefsLock,
+ "packedrefsnew": metrics.packedRefsNew,
+ "refsemptydir": metrics.refsEmptyDir,
+ } {
+ _, err := builder.WriteString(fmt.Sprintf("gitaly_housekeeping_pruned_files_total{filetype=%q} %d\n", metric, expectedValue))
+ require.NoError(t, err)
+ }
+
+ require.NoError(t, testutil.CollectAndCompare(m, strings.NewReader(builder.String()), "gitaly_housekeeping_pruned_files_total"))
+}
+
func TestPerform(t *testing.T) {
testcases := []struct {
- name string
- entries []entry
+ name string
+ entries []entry
+ expectedMetrics cleanStaleDataMetrics
}{
{
name: "clean",
@@ -157,6 +196,9 @@ func TestPerform(t *testing.T) {
f("b", 0o700, 24*time.Hour, Keep),
}),
},
+ expectedMetrics: cleanStaleDataMetrics{
+ objects: 1,
+ },
},
{
name: "subdir temp file",
@@ -167,6 +209,9 @@ func TestPerform(t *testing.T) {
}),
}),
},
+ expectedMetrics: cleanStaleDataMetrics{
+ objects: 1,
+ },
},
{
name: "inaccessible tmp directory",
@@ -189,6 +234,9 @@ func TestPerform(t *testing.T) {
}),
}),
},
+ expectedMetrics: cleanStaleDataMetrics{
+ objects: 1,
+ },
},
{
name: "files outside of object database",
@@ -217,11 +265,15 @@ func TestPerform(t *testing.T) {
e.create(t, repoPath)
}
- require.NoError(t, NewManager(nil).CleanStaleData(ctx, repo))
+ mgr := NewManager(nil)
+
+ require.NoError(t, mgr.CleanStaleData(ctx, repo))
for _, e := range tc.entries {
e.validate(t, repoPath)
}
+
+ requireCleanStaleDataMetrics(t, mgr, tc.expectedMetrics)
})
}
}
@@ -234,9 +286,10 @@ func TestPerform_references(t *testing.T) {
}
testcases := []struct {
- desc string
- refs []ref
- expected []string
+ desc string
+ refs []ref
+ expected []string
+ expectedMetrics cleanStaleDataMetrics
}{
{
desc: "normal reference",
@@ -262,6 +315,9 @@ func TestPerform_references(t *testing.T) {
{name: "refs/heads/master", age: 25 * time.Hour, size: 0},
},
expected: nil,
+ expectedMetrics: cleanStaleDataMetrics{
+ refs: 1,
+ },
},
{
desc: "multiple references",
@@ -284,6 +340,9 @@ func TestPerform_references(t *testing.T) {
"refs/heads/kept-because-recent",
"refs/heads/kept-because-nonempty",
},
+ expectedMetrics: cleanStaleDataMetrics{
+ refs: 3,
+ },
},
}
@@ -302,7 +361,9 @@ func TestPerform_references(t *testing.T) {
}
ctx := testhelper.Context(t)
- require.NoError(t, NewManager(nil).CleanStaleData(ctx, repo))
+ mgr := NewManager(nil)
+
+ require.NoError(t, mgr.CleanStaleData(ctx, repo))
var actual []string
require.NoError(t, filepath.Walk(filepath.Join(repoPath, "refs"), func(path string, info os.FileInfo, _ error) error {
@@ -315,14 +376,17 @@ func TestPerform_references(t *testing.T) {
}))
require.ElementsMatch(t, tc.expected, actual)
+
+ requireCleanStaleDataMetrics(t, mgr, tc.expectedMetrics)
})
}
}
func TestPerform_emptyRefDirs(t *testing.T) {
testcases := []struct {
- name string
- entries []entry
+ name string
+ entries []entry
+ expectedMetrics cleanStaleDataMetrics
}{
{
name: "unrelated empty directories",
@@ -353,6 +417,9 @@ func TestPerform_emptyRefDirs(t *testing.T) {
d("nested", 0o700, 240*time.Hour, Delete, []entry{}),
}),
},
+ expectedMetrics: cleanStaleDataMetrics{
+ refsEmptyDir: 1,
+ },
},
{
name: "hierarchy of nested stale ref dirs gets pruned",
@@ -363,6 +430,9 @@ func TestPerform_emptyRefDirs(t *testing.T) {
}),
}),
},
+ expectedMetrics: cleanStaleDataMetrics{
+ refsEmptyDir: 2,
+ },
},
{
name: "hierarchy with intermediate non-stale ref dir gets kept",
@@ -375,6 +445,9 @@ func TestPerform_emptyRefDirs(t *testing.T) {
}),
}),
},
+ expectedMetrics: cleanStaleDataMetrics{
+ refsEmptyDir: 1,
+ },
},
{
name: "stale hierrachy with refs gets partially retained",
@@ -390,6 +463,9 @@ func TestPerform_emptyRefDirs(t *testing.T) {
}),
}),
},
+ expectedMetrics: cleanStaleDataMetrics{
+ refsEmptyDir: 2,
+ },
},
}
@@ -403,85 +479,128 @@ func TestPerform_emptyRefDirs(t *testing.T) {
e.create(t, repoPath)
}
- require.NoError(t, NewManager(nil).CleanStaleData(ctx, repo))
+ mgr := NewManager(nil)
+
+ require.NoError(t, mgr.CleanStaleData(ctx, repo))
for _, e := range tc.entries {
e.validate(t, repoPath)
}
+
+ requireCleanStaleDataMetrics(t, mgr, tc.expectedMetrics)
})
}
}
func TestPerform_withSpecificFile(t *testing.T) {
- for file, finder := range map[string]staleFileFinderFn{
- "HEAD.lock": findStaleLockfiles,
- "config.lock": findStaleLockfiles,
- "packed-refs.lock": findPackedRefsLock,
- "packed-refs.new": findPackedRefsNew,
- } {
- testPerformWithSpecificFile(t, file, finder)
- }
-}
-
-func testPerformWithSpecificFile(t *testing.T, file string, finder staleFileFinderFn) {
- ctx := testhelper.Context(t)
-
- cfg, repoProto, repoPath := testcfg.BuildWithRepo(t)
- repo := localrepo.NewTestRepo(t, cfg, repoProto)
- mgr := NewManager(nil)
+ t.Parallel()
- require.NoError(t, mgr.CleanStaleData(ctx, repo))
for _, tc := range []struct {
- desc string
- entries []entry
- expectedFiles []string
+ desc string
+ file string
+ finder staleFileFinderFn
+ expectedMetrics cleanStaleDataMetrics
}{
{
- desc: fmt.Sprintf("fresh %s is kept", file),
- entries: []entry{
- f(file, 0o700, 10*time.Minute, Keep),
+ desc: "locked HEAD",
+ file: "HEAD.lock",
+ finder: findStaleLockfiles,
+ expectedMetrics: cleanStaleDataMetrics{
+ locks: 1,
},
},
{
- desc: fmt.Sprintf("stale %s in subdir is kept", file),
- entries: []entry{
- d("subdir", 0o700, 240*time.Hour, Keep, []entry{
- f(file, 0o700, 24*time.Hour, Keep),
- }),
+ desc: "locked config",
+ file: "config.lock",
+ finder: findStaleLockfiles,
+ expectedMetrics: cleanStaleDataMetrics{
+ locks: 1,
},
},
{
- desc: fmt.Sprintf("stale %s is deleted", file),
- entries: []entry{
- f(file, 0o700, 61*time.Minute, Delete),
- },
- expectedFiles: []string{
- filepath.Join(repoPath, file),
+ desc: "locked packed-refs",
+ file: "packed-refs.lock",
+ finder: findPackedRefsLock,
+ expectedMetrics: cleanStaleDataMetrics{
+ packedRefsLock: 1,
},
},
{
- desc: fmt.Sprintf("variations of %s are kept", file),
- entries: []entry{
- f(file[:len(file)-1], 0o700, 61*time.Minute, Keep),
- f("~"+file, 0o700, 61*time.Minute, Keep),
- f(file+"~", 0o700, 61*time.Minute, Keep),
+ desc: "temporary packed-refs",
+ file: "packed-refs.new",
+ finder: findPackedRefsNew,
+ expectedMetrics: cleanStaleDataMetrics{
+ packedRefsNew: 1,
},
},
} {
+ tc := tc
+
t.Run(tc.desc, func(t *testing.T) {
- for _, e := range tc.entries {
- e.create(t, repoPath)
- }
+ t.Parallel()
- staleFiles, err := finder(ctx, repoPath)
- require.NoError(t, err)
- require.ElementsMatch(t, tc.expectedFiles, staleFiles)
+ ctx := testhelper.Context(t)
+
+ cfg, repoProto, repoPath := testcfg.BuildWithRepo(t)
+ repo := localrepo.NewTestRepo(t, cfg, repoProto)
+ mgr := NewManager(nil)
require.NoError(t, mgr.CleanStaleData(ctx, repo))
+ for _, subcase := range []struct {
+ desc string
+ entries []entry
+ expectedFiles []string
+ }{
+ {
+ desc: fmt.Sprintf("fresh %s is kept", tc.file),
+ entries: []entry{
+ f(tc.file, 0o700, 10*time.Minute, Keep),
+ },
+ },
+ {
+ desc: fmt.Sprintf("stale %s in subdir is kept", tc.file),
+ entries: []entry{
+ d("subdir", 0o700, 240*time.Hour, Keep, []entry{
+ f(tc.file, 0o700, 24*time.Hour, Keep),
+ }),
+ },
+ },
+ {
+ desc: fmt.Sprintf("stale %s is deleted", tc.file),
+ entries: []entry{
+ f(tc.file, 0o700, 61*time.Minute, Delete),
+ },
+ expectedFiles: []string{
+ filepath.Join(repoPath, tc.file),
+ },
+ },
+ {
+ desc: fmt.Sprintf("variations of %s are kept", tc.file),
+ entries: []entry{
+ f(tc.file[:len(tc.file)-1], 0o700, 61*time.Minute, Keep),
+ f("~"+tc.file, 0o700, 61*time.Minute, Keep),
+ f(tc.file+"~", 0o700, 61*time.Minute, Keep),
+ },
+ },
+ } {
+ t.Run(subcase.desc, func(t *testing.T) {
+ for _, e := range subcase.entries {
+ e.create(t, repoPath)
+ }
+
+ staleFiles, err := tc.finder(ctx, repoPath)
+ require.NoError(t, err)
+ require.ElementsMatch(t, subcase.expectedFiles, staleFiles)
- for _, e := range tc.entries {
- e.validate(t, repoPath)
+ require.NoError(t, mgr.CleanStaleData(ctx, repo))
+
+ for _, e := range subcase.entries {
+ e.validate(t, repoPath)
+ }
+ })
}
+
+ requireCleanStaleDataMetrics(t, mgr, tc.expectedMetrics)
})
}
}
@@ -493,6 +612,7 @@ func TestPerform_referenceLocks(t *testing.T) {
desc string
entries []entry
expectedReferenceLocks []string
+ expectedMetrics cleanStaleDataMetrics
}{
{
desc: "fresh lock is kept",
@@ -514,6 +634,9 @@ func TestPerform_referenceLocks(t *testing.T) {
expectedReferenceLocks: []string{
"refs/main.lock",
},
+ expectedMetrics: cleanStaleDataMetrics{
+ reflocks: 1,
+ },
},
{
desc: "nested reference locks are deleted",
@@ -538,6 +661,9 @@ func TestPerform_referenceLocks(t *testing.T) {
"refs/heads/main.lock",
"refs/foobar/main.lock",
},
+ expectedMetrics: cleanStaleDataMetrics{
+ reflocks: 3,
+ },
},
} {
t.Run(tc.desc, func(t *testing.T) {
@@ -560,11 +686,15 @@ func TestPerform_referenceLocks(t *testing.T) {
require.NoError(t, err)
require.ElementsMatch(t, expectedReferenceLocks, staleLockfiles)
- require.NoError(t, NewManager(nil).CleanStaleData(ctx, repo))
+ mgr := NewManager(nil)
+
+ require.NoError(t, mgr.CleanStaleData(ctx, repo))
for _, e := range tc.entries {
e.validate(t, repoPath)
}
+
+ requireCleanStaleDataMetrics(t, mgr, tc.expectedMetrics)
})
}
}
diff --git a/internal/git/housekeeping/manager.go b/internal/git/housekeeping/manager.go
index eb391f8c9..859d7091e 100644
--- a/internal/git/housekeeping/manager.go
+++ b/internal/git/housekeeping/manager.go
@@ -22,8 +22,9 @@ type Manager interface {
type RepositoryManager struct {
txManager transaction.Manager
- tasksTotal *prometheus.CounterVec
- tasksLatency *prometheus.HistogramVec
+ tasksTotal *prometheus.CounterVec
+ tasksLatency *prometheus.HistogramVec
+ prunedFilesTotal *prometheus.CounterVec
}
// NewManager creates a new RepositoryManager.
@@ -45,6 +46,13 @@ func NewManager(txManager transaction.Manager) *RepositoryManager {
},
[]string{"housekeeping_task"},
),
+ prunedFilesTotal: prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "gitaly_housekeeping_pruned_files_total",
+ Help: "Total number of files pruned",
+ },
+ []string{"filetype"},
+ ),
}
}
@@ -57,4 +65,5 @@ func (m *RepositoryManager) Describe(descs chan<- *prometheus.Desc) {
func (m *RepositoryManager) Collect(metrics chan<- prometheus.Metric) {
m.tasksTotal.Collect(metrics)
m.tasksLatency.Collect(metrics)
+ m.prunedFilesTotal.Collect(metrics)
}
diff --git a/internal/git/housekeeping/optimize_repository.go b/internal/git/housekeeping/optimize_repository.go
index 9fb60031d..20c0032eb 100644
--- a/internal/git/housekeeping/optimize_repository.go
+++ b/internal/git/housekeeping/optimize_repository.go
@@ -25,18 +25,22 @@ func (m *RepositoryManager) OptimizeRepository(ctx context.Context, repo *localr
totalTimer := prometheus.NewTimer(m.tasksLatency.WithLabelValues("total"))
optimizations := struct {
- PackedObjects bool `json:"packed_objects"`
- PrunedObjects bool `json:"pruned_objects"`
- PackedRefs bool `json:"packed_refs"`
+ PackedObjectsIncremental bool `json:"packed_objects_incremental"`
+ PackedObjectsFull bool `json:"packed_objects_full"`
+ PrunedObjects bool `json:"pruned_objects"`
+ PackedRefs bool `json:"packed_refs"`
+ WrittenBitmap bool `json:"written_bitmap"`
}{}
defer func() {
totalTimer.ObserveDuration()
ctxlogrus.Extract(ctx).WithField("optimizations", optimizations).Info("optimized repository")
for task, executed := range map[string]bool{
- "packed_objects": optimizations.PackedObjects,
- "pruned_objects": optimizations.PrunedObjects,
- "packed_refs": optimizations.PackedRefs,
+ "packed_objects_incremental": optimizations.PackedObjectsIncremental,
+ "packed_objects_full": optimizations.PackedObjectsFull,
+ "pruned_objects": optimizations.PrunedObjects,
+ "packed_refs": optimizations.PackedRefs,
+ "written_bitmap": optimizations.WrittenBitmap,
} {
if executed {
m.tasksTotal.WithLabelValues(task).Add(1)
@@ -57,11 +61,15 @@ func (m *RepositoryManager) OptimizeRepository(ctx context.Context, repo *localr
timer.ObserveDuration()
timer = prometheus.NewTimer(m.tasksLatency.WithLabelValues("repack"))
- didRepack, err := repackIfNeeded(ctx, repo)
+ didRepack, repackCfg, err := repackIfNeeded(ctx, repo)
if err != nil {
return fmt.Errorf("could not repack: %w", err)
}
- optimizations.PackedObjects = didRepack
+ if didRepack {
+ optimizations.PackedObjectsIncremental = !repackCfg.FullRepack
+ optimizations.PackedObjectsFull = repackCfg.FullRepack
+ optimizations.WrittenBitmap = repackCfg.WriteBitmap
+ }
timer.ObserveDuration()
timer = prometheus.NewTimer(m.tasksLatency.WithLabelValues("prune"))
@@ -85,21 +93,21 @@ func (m *RepositoryManager) OptimizeRepository(ctx context.Context, repo *localr
// repackIfNeeded uses a set of heuristics to determine whether the repository needs a
// full repack and, if so, repacks it.
-func repackIfNeeded(ctx context.Context, repo *localrepo.Repo) (bool, error) {
+func repackIfNeeded(ctx context.Context, repo *localrepo.Repo) (bool, RepackObjectsConfig, error) {
repackNeeded, cfg, err := needsRepacking(repo)
if err != nil {
- return false, fmt.Errorf("determining whether repo needs repack: %w", err)
+ return false, RepackObjectsConfig{}, fmt.Errorf("determining whether repo needs repack: %w", err)
}
if !repackNeeded {
- return false, nil
+ return false, RepackObjectsConfig{}, nil
}
if err := RepackObjects(ctx, repo, cfg); err != nil {
- return false, err
+ return false, RepackObjectsConfig{}, err
}
- return true, nil
+ return true, cfg, nil
}
func needsRepacking(repo *localrepo.Repo) (bool, RepackObjectsConfig, error) {
diff --git a/internal/git/housekeeping/optimize_repository_ext_test.go b/internal/git/housekeeping/optimize_repository_ext_test.go
index 2f0a5ed10..e12a172d7 100644
--- a/internal/git/housekeeping/optimize_repository_ext_test.go
+++ b/internal/git/housekeeping/optimize_repository_ext_test.go
@@ -121,13 +121,17 @@ func TestPruneIfNeeded(t *testing.T) {
require.NoError(t, housekeeping.NewManager(nil).OptimizeRepository(ctx, repo))
require.Equal(t,
struct {
- PackedObjects bool `json:"packed_objects"`
- PrunedObjects bool `json:"pruned_objects"`
- PackedRefs bool `json:"packed_refs"`
+ PackedObjectsIncremental bool `json:"packed_objects_incremental"`
+ PackedObjectsFull bool `json:"packed_objects_full"`
+ PrunedObjects bool `json:"pruned_objects"`
+ PackedRefs bool `json:"packed_refs"`
+ WrittenBitmap bool `json:"written_bitmap"`
}{
- PackedObjects: true,
- PrunedObjects: tc.expectedPrune,
- PackedRefs: false,
+ PackedObjectsIncremental: false,
+ PackedObjectsFull: true,
+ PrunedObjects: tc.expectedPrune,
+ PackedRefs: false,
+ WrittenBitmap: true,
},
hook.Entries[len(hook.Entries)-1].Data["optimizations"],
)
diff --git a/internal/git/housekeeping/optimize_repository_test.go b/internal/git/housekeeping/optimize_repository_test.go
index 3bad8ede3..2533b7b99 100644
--- a/internal/git/housekeeping/optimize_repository_test.go
+++ b/internal/git/housekeeping/optimize_repository_test.go
@@ -479,7 +479,8 @@ func TestOptimizeRepository(t *testing.T) {
return repo
},
expectedOptimizations: map[string]float64{
- "packed_objects": 1,
+ "packed_objects_full": 1,
+ "written_bitmap": 1,
},
},
{
@@ -489,7 +490,8 @@ func TestOptimizeRepository(t *testing.T) {
return repo
},
expectedOptimizations: map[string]float64{
- "packed_objects": 1,
+ "packed_objects_full": 1,
+ "written_bitmap": 1,
},
},
{
@@ -500,7 +502,8 @@ func TestOptimizeRepository(t *testing.T) {
return repo
},
expectedOptimizations: map[string]float64{
- "packed_objects": 1,
+ "packed_objects_full": 1,
+ "written_bitmap": 1,
},
},
{
@@ -530,8 +533,8 @@ func TestOptimizeRepository(t *testing.T) {
return repo
},
expectedOptimizations: map[string]float64{
- "packed_objects": 1,
- "pruned_objects": 1,
+ "packed_objects_incremental": 1,
+ "pruned_objects": 1,
},
},
{
@@ -565,9 +568,11 @@ func TestOptimizeRepository(t *testing.T) {
require.Equal(t, tc.expectedErr, err)
for _, metric := range []string{
- "packed_objects",
+ "packed_objects_incremental",
+ "packed_objects_full",
"pruned_objects",
"packed_refs",
+ "written_bitmap",
} {
value := testutil.ToFloat64(manager.tasksTotal.WithLabelValues(metric))
require.Equal(t, tc.expectedOptimizations[metric], value, metric)