Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitaly.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorToon Claes <toon@gitlab.com>2023-07-07 16:52:04 +0300
committerToon Claes <toon@gitlab.com>2023-07-07 16:52:04 +0300
commitaeae6e3ccd967477b4ee9a55e2c1b22840645f50 (patch)
tree0889d260a1ed29540ef093c65b1f5fa5099c34a5
parentc74ec4b1cfe6888d774dba9c897d372e955808c9 (diff)
parent80467fac7efc11fe9f8d5712141d6a4c9af2e812 (diff)
Merge branch 'pks-houskeeping-enable-geometric-repacking' into 'master'
housekeeping: Always enable geometric repacking strategy Closes #5031 See merge request https://gitlab.com/gitlab-org/gitaly/-/merge_requests/6031 Merged-by: Toon Claes <toon@gitlab.com> Approved-by: Toon Claes <toon@gitlab.com> Approved-by: Justin Tobler <jtobler@gitlab.com> Co-authored-by: Patrick Steinhardt <psteinhardt@gitlab.com>
-rw-r--r--internal/featureflag/ff_geometric_repacking.go10
-rw-r--r--internal/git/housekeeping/optimization_strategy.go371
-rw-r--r--internal/git/housekeeping/optimization_strategy_test.go276
-rw-r--r--internal/git/housekeeping/optimize_repository_ext_test.go53
-rw-r--r--internal/git/housekeeping/optimize_repository_test.go83
-rw-r--r--internal/git/housekeeping/testhelper_test.go9
-rw-r--r--internal/git/objectpool/fetch_test.go49
-rw-r--r--internal/gitaly/service/objectpool/fetch_into_object_pool_test.go24
-rw-r--r--internal/gitaly/service/repository/optimize_test.go7
9 files changed, 235 insertions, 647 deletions
diff --git a/internal/featureflag/ff_geometric_repacking.go b/internal/featureflag/ff_geometric_repacking.go
deleted file mode 100644
index 8665b9d21..000000000
--- a/internal/featureflag/ff_geometric_repacking.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package featureflag
-
-// GeometricRepacking will start to use geometric repacking of objects in the context of repository
-// maintenance.
-var GeometricRepacking = NewFeatureFlag(
- "geometric_repacking",
- "v15.11.0",
- "https://gitlab.com/gitlab-org/gitaly/-/issues/5031",
- true,
-)
diff --git a/internal/git/housekeeping/optimization_strategy.go b/internal/git/housekeeping/optimization_strategy.go
index 7ac3563f7..b48c20563 100644
--- a/internal/git/housekeeping/optimization_strategy.go
+++ b/internal/git/housekeeping/optimization_strategy.go
@@ -5,7 +5,6 @@ import (
"math"
"time"
- "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag"
"gitlab.com/gitlab-org/gitaly/v16/internal/git"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/stats"
)
@@ -62,189 +61,20 @@ func (s HeuristicalOptimizationStrategy) ShouldRepackObjects(ctx context.Context
return false, RepackObjectsConfig{}
}
- if featureflag.GeometricRepacking.IsEnabled(ctx) {
- nonCruftPackfilesCount := s.info.Packfiles.Count - s.info.Packfiles.CruftCount
- timeSinceLastFullRepack := time.Since(s.info.Packfiles.LastFullRepack)
-
- fullRepackCfg := RepackObjectsConfig{
- // We use the full-with-unreachable strategy to also pack all unreachable
- // objects into the packfile. This only happens for object pools though,
- // as they should never delete objects.
- Strategy: RepackObjectsStrategyFullWithUnreachable,
- // We cannot write bitmaps when there are alternates as we don't have full
- // closure of all objects in the packfile.
- WriteBitmap: len(s.info.Alternates.ObjectDirectories) == 0,
- // We rewrite all packfiles into a single one and thus change the layout
- // that was indexed by the multi-pack-index. We thus need to update it, as
- // well.
- WriteMultiPackIndex: true,
- }
- if !s.info.IsObjectPool {
- // When we don't have an object pool at hand we want to be able to expire
- // unreachable objects. We thus use cruft packs with an expiry date.
- fullRepackCfg.Strategy = RepackObjectsStrategyFullWithCruft
- fullRepackCfg.CruftExpireBefore = s.expireBefore
- }
-
- geometricRepackCfg := RepackObjectsConfig{
- Strategy: RepackObjectsStrategyGeometric,
- // We cannot write bitmaps when there are alternates as we don't have full
- // closure of all objects in the packfile.
- WriteBitmap: len(s.info.Alternates.ObjectDirectories) == 0,
- // We're rewriting packfiles that may be part of the multi-pack-index, so we
- // do want to update it to reflect the new layout.
- WriteMultiPackIndex: true,
- }
-
- // Incremental repacks only pack unreachable objects into a new pack. As we only
- // perform this kind of repack in the case where the overall repository structure
- // looks good to us we try to do use the least amount of resources to update them.
- // We thus neither update the multi-pack-index nor do we update bitmaps.
- incrementalRepackCfg := RepackObjectsConfig{
- Strategy: RepackObjectsStrategyIncrementalWithUnreachable,
- WriteBitmap: false,
- WriteMultiPackIndex: false,
- }
-
- // When alternative object directories have been modified since our last full repack
- // then we have likely joined an object pool since then. This means that we'll want
- // to perform a full repack in order to deduplicate objects that are part of the
- // object pool.
- if s.info.Alternates.LastModified.After(s.info.Packfiles.LastFullRepack) {
- return true, fullRepackCfg
- }
-
- // It is mandatory for us that we perform regular full repacks in repositories so
- // that we can evict objects which are unreachable into a separate cruft pack. So in
- // the case where we have more than one non-cruft packfiles and the time since our
- // last full repack is longer than the grace period we'll perform a full repack.
- //
- // This heuristic is simple on purpose: customers care about when objects will be
- // declared as unreachable and when the pruning grace period starts as it impacts
- // usage quotas. So with this simple policy we can tell customers that we evict and
- // expire unreachable objects on a regular schedule.
- //
- // On the other hand, for object pools, we also need to perform regular full
- // repacks. The reason is different though, as we don't ever delete objects from
- // pool repositories anyway.
- //
- // Geometric repacking does not take delta islands into account as it does not
- // perform a graph walk. We need proper delta islands though so that packfiles can
- // be efficiently served across forks of a repository.
- //
- // Once a full repack has been performed, the deltas will be carried forward even
- // across geometric repacks. That being said, the quality of our delta islands will
- // regress over time as new objects are pulled into the pool repository.
- //
- // So we perform regular full repacks in the repository to ensure that the delta
- // islands will be "freshened" again. If geometric repacks ever learn to take delta
- // islands into account we can get rid of this condition and only do geometric
- // repacks.
- if nonCruftPackfilesCount > 1 && timeSinceLastFullRepack > FullRepackCooldownPeriod {
- return true, fullRepackCfg
- }
-
- // In case both packfiles and loose objects are in a good state, but we don't yet
- // have a multi-pack-index we perform an incremental repack to generate one. We need
- // to have multi-pack-indices for the next heuristic, so it's bad if it was missing.
- if !s.info.Packfiles.MultiPackIndex.Exists {
- return true, geometricRepackCfg
- }
-
- // Last but not least, we also need to take into account whether new packfiles have
- // been written into the repository since our last geometric repack. This is
- // necessary so that we can enforce the geometric sequence of packfiles and to make
- // sure that the multi-pack-index tracks those new packfiles.
- //
- // To calculate this we use the number of packfiles tracked by the multi-pack index:
- // the difference between the total number of packfiles and the number of packfiles
- // tracked by the index is the amount of packfiles written since the last geometric
- // repack. As we only update the MIDX during housekeeping this metric should in
- // theory be accurate.
- //
- // Theoretically, we could perform a geometric repack whenever there is at least one
- // untracked packfile as git-repack(1) would exit early in case it finds that the
- // geometric sequence is kept. But there are multiple reasons why we want to avoid
- // this:
- //
- // - We would end up spawning git-repack(1) on almost every single repository
- // optimization, but ideally we want to be lazy and do only as much work as is
- // really required.
- //
- // - While we wouldn't need to repack objects in case the geometric sequence is kept
- // anyway, we'd still need to update the multi-pack-index. This action scales with
- // the number of overall objects in the repository.
- //
- // Instead, we use a strategy that heuristically determines whether the repository
- // has too many untracked packfiles and scale the number with the combined size of
- // all packfiles. The intent is to perform geometric repacks less often the larger
- // the repository, also because larger repositories tend to be more active, too.
- //
- // The formula we use is:
- //
- // log(total_packfile_size) / log(1.8)
- //
- // Which gives us the following allowed number of untracked packfiles:
- //
- // -----------------------------------------------------
- // | total packfile size | allowed untracked packfiles |
- // -----------------------------------------------------
- // | none or <10MB | 2 |
- // | 10MB | 3 |
- // | 100MB | 7 |
- // | 500MB | 10 |
- // | 1GB | 11 |
- // | 5GB | 14 |
- // | 10GB | 15 |
- // | 100GB | 19 |
- // -----------------------------------------------------
- allowedLowerLimit := 2.0
- allowedUpperLimit := math.Log(float64(s.info.Packfiles.Size/1024/1024)) / math.Log(1.8)
- actualLimit := math.Max(allowedLowerLimit, allowedUpperLimit)
-
- untrackedPackfiles := s.info.Packfiles.Count - s.info.Packfiles.MultiPackIndex.PackfileCount
-
- if untrackedPackfiles > uint64(actualLimit) {
- return true, geometricRepackCfg
- }
-
- // If there are loose objects then we want to roll them up into a new packfile.
- // Loose objects naturally accumulate during day-to-day operations, e.g. when
- // executing RPCs part of the OperationsService which write objects into the repo
- // directly.
- //
- // As we have already verified that the packfile structure looks okay-ish to us, we
- // don't need to perform a geometric repack here as that could be expensive: we
- // might end up soaking up packfiles because the geometric sequence is not intact,
- // but more importantly we would end up writing the multi-pack-index and potentially
- // a bitmap. Writing these data structures introduces overhead that scales with the
- // number of objects in the repository.
- //
- // So instead, we only do an incremental repack of all loose objects, regardless of
- // their reachability. This is the cheapest we can do: we don't need to compute
- // whether objects are reachable and we don't need to update any data structures
- // that scale with the repository size.
- if s.info.LooseObjects.Count > looseObjectLimit {
- return true, incrementalRepackCfg
- }
-
- return false, RepackObjectsConfig{}
- }
+ nonCruftPackfilesCount := s.info.Packfiles.Count - s.info.Packfiles.CruftCount
+ timeSinceLastFullRepack := time.Since(s.info.Packfiles.LastFullRepack)
fullRepackCfg := RepackObjectsConfig{
- // We use the full-with-unreachable strategy to also pack all unreachable objects
- // into the packfile. This only happens for object pools though, as they should
- // never delete objects.
- //
- // Note that this is quite inefficient, as all unreachable objects will be exploded
- // into loose objects now. This is fixed in our geometric repacking strategy, where
- // we append unreachable objects to the new pack.
- Strategy: RepackObjectsStrategyFullWithLooseUnreachable,
- // We cannot write bitmaps when there are alternates as we don't have full closure
- // of all objects in the packfile.
+ // We use the full-with-unreachable strategy to also pack all unreachable
+ // objects into the packfile. This only happens for object pools though,
+ // as they should never delete objects.
+ Strategy: RepackObjectsStrategyFullWithUnreachable,
+ // We cannot write bitmaps when there are alternates as we don't have full
+ // closure of all objects in the packfile.
WriteBitmap: len(s.info.Alternates.ObjectDirectories) == 0,
- // We want to always update the multi-pack-index while we're already at it repacking
- // some of the objects.
+ // We rewrite all packfiles into a single one and thus change the layout
+ // that was indexed by the multi-pack-index. We thus need to update it, as
+ // well.
WriteMultiPackIndex: true,
}
if !s.info.IsObjectPool {
@@ -254,86 +84,145 @@ func (s HeuristicalOptimizationStrategy) ShouldRepackObjects(ctx context.Context
fullRepackCfg.CruftExpireBefore = s.expireBefore
}
- incrementalRepackCfg := RepackObjectsConfig{
- Strategy: RepackObjectsStrategyIncremental,
- // We cannot write bitmaps when there are alternates as we don't have full closure
- // of all objects in the packfile.
+ geometricRepackCfg := RepackObjectsConfig{
+ Strategy: RepackObjectsStrategyGeometric,
+ // We cannot write bitmaps when there are alternates as we don't have full
+ // closure of all objects in the packfile.
WriteBitmap: len(s.info.Alternates.ObjectDirectories) == 0,
- // We want to always update the multi-pack-index while we're already at it repacking
- // some of the objects.
+ // We're rewriting packfiles that may be part of the multi-pack-index, so we
+ // do want to update it to reflect the new layout.
WriteMultiPackIndex: true,
}
- // When alternative object directories have been modified since our last full repack then we
- // have likely joined an object pool since then. This means that we'll want to perform a
- // full repack in order to deduplicate objects that are part of the object pool.
+ // Incremental repacks only pack unreachable objects into a new pack. As we only
+ // perform this kind of repack in the case where the overall repository structure
+ // looks good to us we try to do use the least amount of resources to update them.
+ // We thus neither update the multi-pack-index nor do we update bitmaps.
+ incrementalRepackCfg := RepackObjectsConfig{
+ Strategy: RepackObjectsStrategyIncrementalWithUnreachable,
+ WriteBitmap: false,
+ WriteMultiPackIndex: false,
+ }
+
+ // When alternative object directories have been modified since our last full repack
+ // then we have likely joined an object pool since then. This means that we'll want
+ // to perform a full repack in order to deduplicate objects that are part of the
+ // object pool.
if s.info.Alternates.LastModified.After(s.info.Packfiles.LastFullRepack) {
return true, fullRepackCfg
}
- // Whenever we do an incremental repack we create a new packfile, and as a result Git may
- // have to look into every one of the packfiles to find objects. This is less efficient the
- // more packfiles we have, but we cannot repack the whole repository every time either given
- // that this may take a lot of time.
+ // It is mandatory for us that we perform regular full repacks in repositories so
+ // that we can evict objects which are unreachable into a separate cruft pack. So in
+ // the case where we have more than one non-cruft packfiles and the time since our
+ // last full repack is longer than the grace period we'll perform a full repack.
//
- // Instead, we determine whether the repository has "too many" packfiles. "Too many" is
- // relative though: for small repositories it's fine to do full repacks regularly, but for
- // large repositories we need to be more careful. We thus use a heuristic of "repository
- // largeness": we take the total size of all packfiles, and then the maximum allowed number
- // of packfiles is `log(total_packfile_size) / log(1.3)` for normal repositories and
- // `log(total_packfile_size) / log(10.0)` for pools. This gives the following allowed number
- // of packfiles:
+ // This heuristic is simple on purpose: customers care about when objects will be
+ // declared as unreachable and when the pruning grace period starts as it impacts
+ // usage quotas. So with this simple policy we can tell customers that we evict and
+ // expire unreachable objects on a regular schedule.
//
- // -----------------------------------------------------------------------------------
- // | total packfile size | allowed packfiles for repos | allowed packfiles for pools |
- // -----------------------------------------------------------------------------------
- // | none or <10MB | 5 | 2 |
- // | 10MB | 8 | 2 |
- // | 100MB | 17 | 2 |
- // | 500MB | 23 | 2 |
- // | 1GB | 26 | 3 |
- // | 5GB | 32 | 3 |
- // | 10GB | 35 | 4 |
- // | 100GB | 43 | 5 |
- // -----------------------------------------------------------------------------------
+ // On the other hand, for object pools, we also need to perform regular full
+ // repacks. The reason is different though, as we don't ever delete objects from
+ // pool repositories anyway.
//
- // The goal is to have a comparatively quick ramp-up of allowed packfiles as the repository
- // size grows, but then slow down such that we're effectively capped and don't end up with
- // an excessive amount of packfiles. On the other hand, pool repositories are potentially
- // reused as basis for many forks and should thus be packed much more aggressively.
+ // Geometric repacking does not take delta islands into account as it does not
+ // perform a graph walk. We need proper delta islands though so that packfiles can
+ // be efficiently served across forks of a repository.
//
- // This is a heuristic and thus imperfect by necessity. We may tune it as we gain experience
- // with the way it behaves.
- lowerLimit, log := 5.0, 1.3
- if s.info.IsObjectPool {
- lowerLimit, log = 2.0, 10.0
+ // Once a full repack has been performed, the deltas will be carried forward even
+ // across geometric repacks. That being said, the quality of our delta islands will
+ // regress over time as new objects are pulled into the pool repository.
+ //
+ // So we perform regular full repacks in the repository to ensure that the delta
+ // islands will be "freshened" again. If geometric repacks ever learn to take delta
+ // islands into account we can get rid of this condition and only do geometric
+ // repacks.
+ if nonCruftPackfilesCount > 1 && timeSinceLastFullRepack > FullRepackCooldownPeriod {
+ return true, fullRepackCfg
}
- if uint64(math.Max(lowerLimit,
- math.Log(float64(s.info.Packfiles.Size/1024/1024))/math.Log(log))) <= s.info.Packfiles.Count {
- return true, fullRepackCfg
+ // In case both packfiles and loose objects are in a good state, but we don't yet
+ // have a multi-pack-index we perform an incremental repack to generate one. We need
+ // to have multi-pack-indices for the next heuristic, so it's bad if it was missing.
+ if !s.info.Packfiles.MultiPackIndex.Exists {
+ return true, geometricRepackCfg
}
- // Most Git commands do not write packfiles directly, but instead write loose objects into
- // the object database. So while we now know that there ain't too many packfiles, we still
- // need to check whether we have too many objects.
+ // Last but not least, we also need to take into account whether new packfiles have
+ // been written into the repository since our last geometric repack. This is
+ // necessary so that we can enforce the geometric sequence of packfiles and to make
+ // sure that the multi-pack-index tracks those new packfiles.
//
- // In this case it doesn't make a lot of sense to scale incremental repacks with the repo's
- // size: we only pack loose objects, so the time to pack them doesn't scale with repository
- // size but with the number of loose objects we have. git-gc(1) uses a threshold of 6700
- // loose objects to start an incremental repack, but one needs to keep in mind that Git
- // typically has defaults which are better suited for the client-side instead of the
- // server-side in most commands.
+ // To calculate this we use the number of packfiles tracked by the multi-pack index:
+ // the difference between the total number of packfiles and the number of packfiles
+ // tracked by the index is the amount of packfiles written since the last geometric
+ // repack. As we only update the MIDX during housekeeping this metric should in
+ // theory be accurate.
//
- // In our case we typically want to ensure that our repositories are much better packed than
- // it is necessary on the client side. We thus take a much stricter limit of 1024 objects.
- if s.info.LooseObjects.Count > looseObjectLimit {
- return true, incrementalRepackCfg
+ // Theoretically, we could perform a geometric repack whenever there is at least one
+ // untracked packfile as git-repack(1) would exit early in case it finds that the
+ // geometric sequence is kept. But there are multiple reasons why we want to avoid
+ // this:
+ //
+ // - We would end up spawning git-repack(1) on almost every single repository
+ // optimization, but ideally we want to be lazy and do only as much work as is
+ // really required.
+ //
+ // - While we wouldn't need to repack objects in case the geometric sequence is kept
+ // anyway, we'd still need to update the multi-pack-index. This action scales with
+ // the number of overall objects in the repository.
+ //
+ // Instead, we use a strategy that heuristically determines whether the repository
+ // has too many untracked packfiles and scale the number with the combined size of
+ // all packfiles. The intent is to perform geometric repacks less often the larger
+ // the repository, also because larger repositories tend to be more active, too.
+ //
+ // The formula we use is:
+ //
+ // log(total_packfile_size) / log(1.8)
+ //
+ // Which gives us the following allowed number of untracked packfiles:
+ //
+ // -----------------------------------------------------
+ // | total packfile size | allowed untracked packfiles |
+ // -----------------------------------------------------
+ // | none or <10MB | 2 |
+ // | 10MB | 3 |
+ // | 100MB | 7 |
+ // | 500MB | 10 |
+ // | 1GB | 11 |
+ // | 5GB | 14 |
+ // | 10GB | 15 |
+ // | 100GB | 19 |
+ // -----------------------------------------------------
+ allowedLowerLimit := 2.0
+ allowedUpperLimit := math.Log(float64(s.info.Packfiles.Size/1024/1024)) / math.Log(1.8)
+ actualLimit := math.Max(allowedLowerLimit, allowedUpperLimit)
+
+ untrackedPackfiles := s.info.Packfiles.Count - s.info.Packfiles.MultiPackIndex.PackfileCount
+
+ if untrackedPackfiles > uint64(actualLimit) {
+ return true, geometricRepackCfg
}
- // In case both packfiles and loose objects are in a good state, but we don't yet have a
- // multi-pack-index we perform an incremental repack to generate one.
- if !s.info.Packfiles.MultiPackIndex.Exists {
+ // If there are loose objects then we want to roll them up into a new packfile.
+ // Loose objects naturally accumulate during day-to-day operations, e.g. when
+ // executing RPCs part of the OperationsService which write objects into the repo
+ // directly.
+ //
+ // As we have already verified that the packfile structure looks okay-ish to us, we
+ // don't need to perform a geometric repack here as that could be expensive: we
+ // might end up soaking up packfiles because the geometric sequence is not intact,
+ // but more importantly we would end up writing the multi-pack-index and potentially
+ // a bitmap. Writing these data structures introduces overhead that scales with the
+ // number of objects in the repository.
+ //
+ // So instead, we only do an incremental repack of all loose objects, regardless of
+ // their reachability. This is the cheapest we can do: we don't need to compute
+ // whether objects are reachable and we don't need to update any data structures
+ // that scale with the repository size.
+ if s.info.LooseObjects.Count > looseObjectLimit {
return true, incrementalRepackCfg
}
diff --git a/internal/git/housekeeping/optimization_strategy_test.go b/internal/git/housekeeping/optimization_strategy_test.go
index 7075300f7..f31f02e25 100644
--- a/internal/git/housekeeping/optimization_strategy_test.go
+++ b/internal/git/housekeeping/optimization_strategy_test.go
@@ -2,12 +2,10 @@ package housekeeping
import (
"context"
- "fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
- "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag"
"gitlab.com/gitlab-org/gitaly/v16/internal/git"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/stats"
"gitlab.com/gitlab-org/gitaly/v16/internal/testhelper"
@@ -15,11 +13,8 @@ import (
func TestHeuristicalOptimizationStrategy_ShouldRepackObjects(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testHeuristicalOptimizationStrategyShouldRepackObjects)
-}
-func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
for _, tc := range []struct {
desc string
@@ -45,10 +40,7 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
expectedNeeded: true,
expectedConfig: RepackObjectsConfig{
- Strategy: geometricOrIncremental(ctx,
- RepackObjectsStrategyGeometric,
- RepackObjectsStrategyIncremental,
- ),
+ Strategy: RepackObjectsStrategyGeometric,
WriteBitmap: true,
WriteMultiPackIndex: true,
},
@@ -76,10 +68,7 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
// exist in pooled repositories.
expectedNeeded: true,
expectedConfig: RepackObjectsConfig{
- Strategy: geometricOrIncremental(ctx,
- RepackObjectsStrategyGeometric,
- RepackObjectsStrategyIncremental,
- ),
+ Strategy: RepackObjectsStrategyGeometric,
WriteMultiPackIndex: true,
},
},
@@ -97,10 +86,7 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
expectedNeeded: true,
expectedConfig: RepackObjectsConfig{
- Strategy: geometricOrIncremental(ctx,
- RepackObjectsStrategyGeometric,
- RepackObjectsStrategyIncremental,
- ),
+ Strategy: RepackObjectsStrategyGeometric,
WriteBitmap: true,
WriteMultiPackIndex: true,
},
@@ -157,15 +143,12 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
},
},
- expectedNeeded: geometricOrIncremental(ctx, true, false),
- expectedConfig: geometricOrIncremental(ctx,
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyFullWithCruft,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- RepackObjectsConfig{},
- ),
+ expectedNeeded: true,
+ expectedConfig: RepackObjectsConfig{
+ Strategy: RepackObjectsStrategyFullWithCruft,
+ WriteBitmap: true,
+ WriteMultiPackIndex: true,
+ },
},
{
desc: "old tracked packfiles with cruft pack will not be repacked",
@@ -206,15 +189,8 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
},
},
- expectedNeeded: geometricOrIncremental(ctx, false, true),
- expectedConfig: geometricOrIncremental(ctx,
- RepackObjectsConfig{},
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyFullWithLooseUnreachable,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- ),
+ expectedNeeded: false,
+ expectedConfig: RepackObjectsConfig{},
},
{
desc: "old tracked packfiles in pool repository will be repacked",
@@ -235,18 +211,11 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
},
expectedNeeded: true,
- expectedConfig: geometricOrIncremental(ctx,
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyFullWithUnreachable,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyFullWithLooseUnreachable,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- ),
+ expectedConfig: RepackObjectsConfig{
+ Strategy: RepackObjectsStrategyFullWithUnreachable,
+ WriteBitmap: true,
+ WriteMultiPackIndex: true,
+ },
},
{
desc: "few untracked packfiles will not get repacked",
@@ -266,15 +235,8 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
},
},
- expectedNeeded: geometricOrIncremental(ctx, false, true),
- expectedConfig: geometricOrIncremental(ctx,
- RepackObjectsConfig{},
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyFullWithCruft,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- ),
+ expectedNeeded: false,
+ expectedConfig: RepackObjectsConfig{},
},
{
desc: "many untracked packfiles will get repacked",
@@ -295,18 +257,11 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
},
expectedNeeded: true,
- expectedConfig: geometricOrIncremental(ctx,
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyGeometric,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyFullWithCruft,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- ),
+ expectedConfig: RepackObjectsConfig{
+ Strategy: RepackObjectsStrategyGeometric,
+ WriteBitmap: true,
+ WriteMultiPackIndex: true,
+ },
},
{
desc: "larger packfiles allow more untracked packfiles",
@@ -345,15 +300,12 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
},
},
- expectedNeeded: geometricOrIncremental(ctx, true, false),
- expectedConfig: geometricOrIncremental(ctx,
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyGeometric,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- RepackObjectsConfig{},
- ),
+ expectedNeeded: true,
+ expectedConfig: RepackObjectsConfig{
+ Strategy: RepackObjectsStrategyGeometric,
+ WriteBitmap: true,
+ WriteMultiPackIndex: true,
+ },
},
{
desc: "more tracked packfiles than exist will repack to update MIDX",
@@ -375,15 +327,12 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
},
},
- expectedNeeded: geometricOrIncremental(ctx, true, false),
- expectedConfig: geometricOrIncremental(ctx,
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyGeometric,
- WriteBitmap: true,
- WriteMultiPackIndex: true,
- },
- RepackObjectsConfig{},
- ),
+ expectedNeeded: true,
+ expectedConfig: RepackObjectsConfig{
+ Strategy: RepackObjectsStrategyGeometric,
+ WriteBitmap: true,
+ WriteMultiPackIndex: true,
+ },
},
{
desc: "geometric repack in object pool member with recent Git version",
@@ -404,18 +353,11 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
},
},
expectedNeeded: true,
- expectedConfig: geometricOrIncremental(ctx,
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyGeometric,
- WriteBitmap: false,
- WriteMultiPackIndex: true,
- },
- RepackObjectsConfig{
- Strategy: RepackObjectsStrategyFullWithCruft,
- WriteBitmap: false,
- WriteMultiPackIndex: true,
- },
- ),
+ expectedConfig: RepackObjectsConfig{
+ Strategy: RepackObjectsStrategyGeometric,
+ WriteBitmap: false,
+ WriteMultiPackIndex: true,
+ },
},
{
desc: "alternates modified after last full repack",
@@ -469,129 +411,6 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
}
for _, outerTC := range []struct {
- packfileSizeInMB uint64
- requiredPackfiles uint64
- requiredPackfilesForPool uint64
- }{
- {
- packfileSizeInMB: 1,
- requiredPackfiles: 5,
- requiredPackfilesForPool: 2,
- },
- {
- packfileSizeInMB: 5,
- requiredPackfiles: 6,
- requiredPackfilesForPool: 2,
- },
- {
- packfileSizeInMB: 10,
- requiredPackfiles: 8,
- requiredPackfilesForPool: 2,
- },
- {
- packfileSizeInMB: 50,
- requiredPackfiles: 14,
- requiredPackfilesForPool: 2,
- },
- {
- packfileSizeInMB: 100,
- requiredPackfiles: 17,
- requiredPackfilesForPool: 2,
- },
- {
- packfileSizeInMB: 500,
- requiredPackfiles: 23,
- requiredPackfilesForPool: 2,
- },
- {
- packfileSizeInMB: 1001,
- requiredPackfiles: 26,
- requiredPackfilesForPool: 3,
- },
- } {
- // These tests don't really make any sense in the context of geometric repacking as
- // we don't care for the size of packfiles anymore. The test can be deleted once the
- // old strategy has been removed.
- if featureflag.GeometricRepacking.IsEnabled(ctx) {
- break
- }
-
- t.Run(fmt.Sprintf("packfile with %dMB", outerTC.packfileSizeInMB), func(t *testing.T) {
- for _, tc := range []struct {
- desc string
- isPool bool
- alternates []string
- requiredPackfiles uint64
- }{
- {
- desc: "normal repository",
- isPool: false,
- requiredPackfiles: outerTC.requiredPackfiles,
- },
- {
- desc: "pooled repository",
- isPool: false,
- alternates: []string{"something"},
- requiredPackfiles: outerTC.requiredPackfiles,
- },
- {
- desc: "object pool",
- isPool: true,
- requiredPackfiles: outerTC.requiredPackfilesForPool,
- },
- } {
- t.Run(tc.desc, func(t *testing.T) {
- expireBefore := time.Now()
- strategy := HeuristicalOptimizationStrategy{
- info: stats.RepositoryInfo{
- IsObjectPool: tc.isPool,
- Packfiles: stats.PackfilesInfo{
- Size: outerTC.packfileSizeInMB * 1024 * 1024,
- Count: tc.requiredPackfiles - 1,
- Bitmap: stats.BitmapInfo{
- Exists: true,
- },
- MultiPackIndex: stats.MultiPackIndexInfo{
- Exists: true,
- },
- },
- Alternates: stats.AlternatesInfo{
- ObjectDirectories: tc.alternates,
- },
- },
- expireBefore: expireBefore,
- }
-
- if tc.isPool {
- expireBefore = time.Time{}
- }
-
- repackNeeded, _ := strategy.ShouldRepackObjects(ctx)
- require.False(t, repackNeeded)
-
- // Now we add the last packfile that should bring us across
- // the boundary of having to repack.
- strategy.info.Packfiles.Count++
-
- repackNeeded, repackCfg := strategy.ShouldRepackObjects(ctx)
- require.True(t, repackNeeded)
- require.Equal(t, RepackObjectsConfig{
- Strategy: func() RepackObjectsStrategy {
- if !tc.isPool {
- return RepackObjectsStrategyFullWithCruft
- }
- return RepackObjectsStrategyFullWithLooseUnreachable
- }(),
- WriteBitmap: len(tc.alternates) == 0,
- WriteMultiPackIndex: true,
- CruftExpireBefore: expireBefore,
- }, repackCfg)
- })
- }
- })
- }
-
- for _, outerTC := range []struct {
desc string
looseObjects uint64
expectedRepack bool
@@ -655,15 +474,10 @@ func testHeuristicalOptimizationStrategyShouldRepackObjects(t *testing.T, ctx co
require.Equal(t, RepackObjectsConfig{
Strategy: func() RepackObjectsStrategy {
if repackNeeded {
- return geometricOrIncremental(ctx,
- RepackObjectsStrategyIncrementalWithUnreachable,
- RepackObjectsStrategyIncremental,
- )
+ return RepackObjectsStrategyIncrementalWithUnreachable
}
return ""
}(),
- WriteBitmap: repackNeeded && geometricOrIncremental(ctx, false, true),
- WriteMultiPackIndex: repackNeeded && geometricOrIncremental(ctx, false, true),
}, repackCfg)
})
}
@@ -810,11 +624,7 @@ func TestHeuristicalOptimizationStrategy_ShouldRepackReferences(t *testing.T) {
func TestHeuristicalOptimizationStrategy_NeedsWriteCommitGraph(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testHeuristicalOptimizationStrategyNeedsWriteCommitGraph)
-}
-
-func testHeuristicalOptimizationStrategyNeedsWriteCommitGraph(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
for _, tc := range []struct {
desc string
diff --git a/internal/git/housekeeping/optimize_repository_ext_test.go b/internal/git/housekeeping/optimize_repository_ext_test.go
index b27fe0a1a..5b99a3325 100644
--- a/internal/git/housekeeping/optimize_repository_ext_test.go
+++ b/internal/git/housekeeping/optimize_repository_ext_test.go
@@ -1,7 +1,6 @@
package housekeeping_test
import (
- "context"
"fmt"
"os"
"path/filepath"
@@ -12,7 +11,6 @@ import (
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
- "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/housekeeping"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo"
@@ -26,12 +24,8 @@ import (
func TestPruneIfNeeded(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testPruneIfNeeded)
-}
-
-func testPruneIfNeeded(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg := testcfg.Build(t)
cfg.SocketPath = testserver.RunGitalyServer(t, cfg, setup.RegisterAll)
@@ -53,9 +47,9 @@ func testPruneIfNeeded(t *testing.T, ctx context.Context) {
filepath.Join("ab/12345"),
},
expectedLogEntries: map[string]string{
- geometricOrIncrementalLog(ctx): "success",
- "written_bitmap": "success",
- "written_multi_pack_index": "success",
+ "packed_objects_geometric": "success",
+ "written_bitmap": "success",
+ "written_multi_pack_index": "success",
},
},
{
@@ -64,9 +58,9 @@ func testPruneIfNeeded(t *testing.T, ctx context.Context) {
filepath.Join("17/12345"),
},
expectedLogEntries: map[string]string{
- geometricOrIncrementalLog(ctx): "success",
- "written_bitmap": "success",
- "written_multi_pack_index": "success",
+ "packed_objects_geometric": "success",
+ "written_bitmap": "success",
+ "written_multi_pack_index": "success",
},
},
{
@@ -78,9 +72,9 @@ func testPruneIfNeeded(t *testing.T, ctx context.Context) {
filepath.Join("17/12345"),
},
expectedLogEntries: map[string]string{
- geometricOrIncrementalLog(ctx): "success",
- "written_bitmap": "success",
- "written_multi_pack_index": "success",
+ "packed_objects_geometric": "success",
+ "written_bitmap": "success",
+ "written_multi_pack_index": "success",
},
},
{
@@ -98,9 +92,9 @@ func testPruneIfNeeded(t *testing.T, ctx context.Context) {
return looseObjects
}(),
expectedLogEntries: map[string]string{
- geometricOrIncrementalLog(ctx): "success",
- "written_bitmap": "success",
- "written_multi_pack_index": "success",
+ "packed_objects_geometric": "success",
+ "written_bitmap": "success",
+ "written_multi_pack_index": "success",
},
},
{
@@ -121,9 +115,9 @@ func testPruneIfNeeded(t *testing.T, ctx context.Context) {
return &t
}(),
expectedLogEntries: map[string]string{
- geometricOrIncrementalLog(ctx): "success",
- "written_bitmap": "success",
- "written_multi_pack_index": "success",
+ "packed_objects_geometric": "success",
+ "written_bitmap": "success",
+ "written_multi_pack_index": "success",
},
},
{
@@ -144,10 +138,10 @@ func testPruneIfNeeded(t *testing.T, ctx context.Context) {
return &t
}(),
expectedLogEntries: map[string]string{
- geometricOrIncrementalLog(ctx): "success",
- "pruned_objects": "success",
- "written_bitmap": "success",
- "written_multi_pack_index": "success",
+ "packed_objects_geometric": "success",
+ "pruned_objects": "success",
+ "written_bitmap": "success",
+ "written_multi_pack_index": "success",
},
},
} {
@@ -186,10 +180,3 @@ func testPruneIfNeeded(t *testing.T, ctx context.Context) {
})
}
}
-
-func geometricOrIncrementalLog(ctx context.Context) string {
- if featureflag.GeometricRepacking.IsEnabled(ctx) {
- return "packed_objects_geometric"
- }
- return "packed_objects_incremental"
-}
diff --git a/internal/git/housekeeping/optimize_repository_test.go b/internal/git/housekeeping/optimize_repository_test.go
index 84b878f29..0b96e7a02 100644
--- a/internal/git/housekeeping/optimize_repository_test.go
+++ b/internal/git/housekeeping/optimize_repository_test.go
@@ -15,7 +15,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly/v16/internal/command"
- "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag"
"gitlab.com/gitlab-org/gitaly/v16/internal/git"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo"
@@ -448,12 +447,8 @@ func TestPackRefsIfNeeded(t *testing.T) {
func TestOptimizeRepository(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testOptimizeRepository)
-}
-
-func testOptimizeRepository(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg := testcfg.Build(t)
txManager := transaction.NewManager(cfg, backchannel.NewRegistry())
@@ -484,8 +479,6 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return packs
}
- geometricOrIncrementalMetric := geometricOrIncremental(ctx, "packed_objects_geometric", "packed_objects_incremental")
-
type metric struct {
name, status string
count int
@@ -530,7 +523,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_commit_graph_full", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
@@ -551,7 +544,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
{name: "written_commit_graph_full", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
@@ -573,7 +566,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
{name: "written_commit_graph_full", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
@@ -594,7 +587,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
{name: "written_commit_graph_full", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
@@ -623,38 +616,20 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
- expectedMetrics: geometricOrIncremental(ctx,
- []metric{
- {name: "packed_objects_full_with_cruft", status: "success", count: 1},
- {name: "written_bitmap", status: "success", count: 1},
- {name: "written_commit_graph_full", status: "success", count: 1},
- {name: "written_multi_pack_index", status: "success", count: 1},
- {name: "total", status: "success", count: 1},
- },
- []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
- {name: "written_bitmap", status: "success", count: 1},
- {name: "written_commit_graph_incremental", status: "success", count: 1},
- {name: "written_multi_pack_index", status: "success", count: 1},
- {name: "total", status: "success", count: 1},
- },
- ),
- expectedMetricsForPool: geometricOrIncremental(ctx,
- []metric{
- {name: "packed_objects_full_with_unreachable", status: "success", count: 1},
- {name: "written_bitmap", status: "success", count: 1},
- {name: "written_commit_graph_incremental", status: "success", count: 1},
- {name: "written_multi_pack_index", status: "success", count: 1},
- {name: "total", status: "success", count: 1},
- },
- []metric{
- {name: "packed_objects_full_with_loose_unreachable", status: "success", count: 1},
- {name: "written_bitmap", status: "success", count: 1},
- {name: "written_commit_graph_incremental", status: "success", count: 1},
- {name: "written_multi_pack_index", status: "success", count: 1},
- {name: "total", status: "success", count: 1},
- },
- ),
+ expectedMetrics: []metric{
+ {name: "packed_objects_full_with_cruft", status: "success", count: 1},
+ {name: "written_bitmap", status: "success", count: 1},
+ {name: "written_commit_graph_full", status: "success", count: 1},
+ {name: "written_multi_pack_index", status: "success", count: 1},
+ {name: "total", status: "success", count: 1},
+ },
+ expectedMetricsForPool: []metric{
+ {name: "packed_objects_full_with_unreachable", status: "success", count: 1},
+ {name: "written_bitmap", status: "success", count: 1},
+ {name: "written_commit_graph_incremental", status: "success", count: 1},
+ {name: "written_multi_pack_index", status: "success", count: 1},
+ {name: "total", status: "success", count: 1},
+ },
}
},
},
@@ -671,7 +646,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
{name: "written_commit_graph_incremental", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
@@ -727,7 +702,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
{name: "written_commit_graph_incremental", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
@@ -762,7 +737,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "pruned_objects", status: "success", count: 1},
{name: "written_commit_graph_full", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
@@ -771,7 +746,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
},
// Object pools never prune objects.
expectedMetricsForPool: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
{name: "written_commit_graph_incremental", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
@@ -798,7 +773,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "packed_refs", status: "success", count: 1},
{name: "written_commit_graph_incremental", status: "success", count: 1},
{name: "written_bitmap", status: "success", count: 1},
@@ -828,7 +803,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_commit_graph_full", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
{name: "total", status: "success", count: 1},
@@ -884,7 +859,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_commit_graph_full", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
{name: "total", status: "success", count: 1},
@@ -921,7 +896,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
},
expectedMetricsForPool: []metric{
{
- name: geometricOrIncremental(ctx, "packed_objects_full_with_unreachable", "packed_objects_full_with_loose_unreachable"),
+ name: "packed_objects_full_with_unreachable",
status: "success",
count: 1,
},
@@ -1006,7 +981,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.NewTestRepo(t, cfg, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "success", count: 1},
+ {name: "packed_objects_geometric", status: "success", count: 1},
{name: "written_commit_graph_full", status: "success", count: 1},
{name: "written_multi_pack_index", status: "success", count: 1},
{name: "total", status: "success", count: 1},
@@ -1033,7 +1008,7 @@ func testOptimizeRepository(t *testing.T, ctx context.Context) {
return setupData{
repo: localrepo.New(config.NewLocator(cfg), gitCmdFactory, nil, repo),
expectedMetrics: []metric{
- {name: geometricOrIncrementalMetric, status: "failure", count: 1},
+ {name: "packed_objects_geometric", status: "failure", count: 1},
{name: "written_bitmap", status: "failure", count: 1},
{name: "written_multi_pack_index", status: "failure", count: 1},
{name: "total", status: "failure", count: 1},
diff --git a/internal/git/housekeeping/testhelper_test.go b/internal/git/housekeeping/testhelper_test.go
index 5334733b0..95318872b 100644
--- a/internal/git/housekeeping/testhelper_test.go
+++ b/internal/git/housekeeping/testhelper_test.go
@@ -1,11 +1,9 @@
package housekeeping
import (
- "context"
"testing"
"github.com/stretchr/testify/require"
- "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/stats"
@@ -55,10 +53,3 @@ func requireObjectsState(tb testing.TB, repo *localrepo.Repo, expectedState obje
hasMultiPackIndexBitmap: repoInfo.Packfiles.MultiPackIndexBitmap.Exists,
})
}
-
-func geometricOrIncremental[T any](ctx context.Context, geometric T, notGeometric T) T {
- if featureflag.GeometricRepacking.IsEnabled(ctx) {
- return geometric
- }
- return notGeometric
-}
diff --git a/internal/git/objectpool/fetch_test.go b/internal/git/objectpool/fetch_test.go
index d5b516d9e..395a3f8dd 100644
--- a/internal/git/objectpool/fetch_test.go
+++ b/internal/git/objectpool/fetch_test.go
@@ -1,7 +1,6 @@
package objectpool
import (
- "context"
"fmt"
"path/filepath"
"strconv"
@@ -11,7 +10,6 @@ import (
"github.com/sirupsen/logrus"
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
- "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag"
"gitlab.com/gitlab-org/gitaly/v16/internal/git"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/stats"
@@ -22,12 +20,8 @@ import (
func TestFetchFromOrigin_dangling(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchFromOriginDangling)
-}
-
-func testFetchFromOriginDangling(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, pool, repo := setupObjectPool(t, ctx)
poolPath := gittest.RepositoryPath(t, pool)
repoPath := gittest.RepositoryPath(t, repo)
@@ -86,12 +80,8 @@ func testFetchFromOriginDangling(t *testing.T, ctx context.Context) {
func TestFetchFromOrigin_fsck(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchFromOriginFsck)
-}
-
-func testFetchFromOriginFsck(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, pool, repo := setupObjectPool(t, ctx)
repoPath, err := repo.Path()
require.NoError(t, err)
@@ -115,12 +105,8 @@ func testFetchFromOriginFsck(t *testing.T, ctx context.Context) {
func TestFetchFromOrigin_deltaIslands(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchFromOriginDeltaIslands)
-}
-
-func testFetchFromOriginDeltaIslands(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, pool, repo := setupObjectPool(t, ctx)
poolPath := gittest.RepositoryPath(t, pool)
repoPath := gittest.RepositoryPath(t, repo)
@@ -145,12 +131,8 @@ func testFetchFromOriginDeltaIslands(t *testing.T, ctx context.Context) {
func TestFetchFromOrigin_bitmapHashCache(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchFromOriginBitmapHashCache)
-}
-
-func testFetchFromOriginBitmapHashCache(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, pool, repo := setupObjectPool(t, ctx)
repoPath, err := repo.Path()
require.NoError(t, err)
@@ -175,12 +157,8 @@ func testFetchFromOriginBitmapHashCache(t *testing.T, ctx context.Context) {
func TestFetchFromOrigin_refUpdates(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchFromOriginRefUpdates)
-}
-
-func testFetchFromOriginRefUpdates(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, pool, repo := setupObjectPool(t, ctx)
repoPath, err := repo.Path()
require.NoError(t, err)
@@ -226,12 +204,8 @@ func testFetchFromOriginRefUpdates(t *testing.T, ctx context.Context) {
func TestFetchFromOrigin_refs(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchFromOriginRefs)
-}
-
-func testFetchFromOriginRefs(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, pool, repo := setupObjectPool(t, ctx)
repoPath, err := repo.Path()
require.NoError(t, err)
@@ -269,12 +243,8 @@ func testFetchFromOriginRefs(t *testing.T, ctx context.Context) {
func TestFetchFromOrigin_missingPool(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchFromOriginMissingPool)
-}
-
-func testFetchFromOriginMissingPool(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
_, pool, repo := setupObjectPool(t, ctx)
// Remove the object pool to assert that we raise an error when fetching into a non-existent
@@ -287,11 +257,8 @@ func testFetchFromOriginMissingPool(t *testing.T, ctx context.Context) {
func TestObjectPool_logStats(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testObjectPoolLogStats)
-}
-func testObjectPoolLogStats(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
for _, tc := range []struct {
desc string
diff --git a/internal/gitaly/service/objectpool/fetch_into_object_pool_test.go b/internal/gitaly/service/objectpool/fetch_into_object_pool_test.go
index 176555599..a8ade916c 100644
--- a/internal/gitaly/service/objectpool/fetch_into_object_pool_test.go
+++ b/internal/gitaly/service/objectpool/fetch_into_object_pool_test.go
@@ -13,7 +13,6 @@ import (
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag"
"gitlab.com/gitlab-org/gitaly/v16/internal/git"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/stats"
@@ -37,12 +36,8 @@ import (
func TestFetchIntoObjectPool_Success(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchIntoObjectPoolSuccess)
-}
-
-func testFetchIntoObjectPoolSuccess(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, repo, repoPath, _, client := setup(t, ctx)
parentID := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main"))
@@ -90,11 +85,8 @@ func testFetchIntoObjectPoolSuccess(t *testing.T, ctx context.Context) {
func TestFetchIntoObjectPool_transactional(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchIntoObjectPoolTransactional)
-}
-func testFetchIntoObjectPoolTransactional(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
var votes []voting.Vote
var votesMutex sync.Mutex
@@ -206,12 +198,8 @@ func testFetchIntoObjectPoolTransactional(t *testing.T, ctx context.Context) {
func TestFetchIntoObjectPool_CollectLogStatistics(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchIntoObjectPoolcollectLogStatistics)
-}
-
-func testFetchIntoObjectPoolcollectLogStatistics(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg := testcfg.Build(t)
testcfg.BuildGitalyHooks(t, cfg)
@@ -302,12 +290,8 @@ func TestFetchIntoObjectPool_Failure(t *testing.T) {
func TestFetchIntoObjectPool_dfConflict(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testFetchIntoObjectPoolDfConflict)
-}
-
-func testFetchIntoObjectPoolDfConflict(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, repo, repoPath, _, client := setup(t, ctx)
poolProto, _, poolPath := createObjectPool(t, ctx, cfg, repo)
diff --git a/internal/gitaly/service/repository/optimize_test.go b/internal/gitaly/service/repository/optimize_test.go
index 7228f708e..728fd472b 100644
--- a/internal/gitaly/service/repository/optimize_test.go
+++ b/internal/gitaly/service/repository/optimize_test.go
@@ -12,7 +12,6 @@ import (
"github.com/sirupsen/logrus/hooks/test"
"github.com/stretchr/testify/require"
- "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/housekeeping"
"gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo"
@@ -28,12 +27,8 @@ import (
func TestOptimizeRepository(t *testing.T) {
t.Parallel()
- testhelper.NewFeatureSets(featureflag.GeometricRepacking).Run(t, testOptimizeRepository)
-}
-
-func testOptimizeRepository(t *testing.T, ctx context.Context) {
- t.Parallel()
+ ctx := testhelper.Context(t)
cfg, client := setupRepositoryServiceWithoutRepo(t)
t.Run("gitconfig credentials get pruned", func(t *testing.T) {