Welcome to mirror list, hosted at ThFree Co, Russian Federation.

replicator.go « praefect « internal - gitlab.com/gitlab-org/gitaly.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 63511ae4c243d87a17897a5e6ddea0dd58136517 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
package praefect

import (
	"context"
	"fmt"
	"time"

	"github.com/prometheus/client_golang/prometheus"
	"github.com/sirupsen/logrus"
	"golang.org/x/sync/errgroup"
	"google.golang.org/grpc"

	"gitlab.com/gitlab-org/gitaly/internal/helper"
	"gitlab.com/gitlab-org/gitaly/internal/praefect/conn"
	"gitlab.com/gitlab-org/gitaly/internal/praefect/models"
	"gitlab.com/gitlab-org/gitaly/proto/go/gitalypb"
)

var (
	replicationLatency = prometheus.NewHistogram(
		prometheus.HistogramOpts{
			Namespace: "gitaly",
			Subsystem: "praefect",
			Name:      "replication_latency",
			Buckets:   prometheus.LinearBuckets(0, 100, 100),
		},
	)

	replicationJobsInFlight = prometheus.NewGauge(
		prometheus.GaugeOpts{
			Namespace: "gitaly",
			Subsystem: "praefect",
			Name:      "replication_jobs",
		},
	)

	recordReplicationLatency = func(d float64) {
		go replicationLatency.Observe(d)
	}

	incReplicationJobsInFlight = func() {
		go replicationJobsInFlight.Inc()
	}

	decReplicationJobsInFlight = func() {
		go replicationJobsInFlight.Dec()
	}
)

func init() {
	prometheus.MustRegister(replicationLatency)
	prometheus.MustRegister(replicationJobsInFlight)
}

// Replicator performs the actual replication logic between two nodes
type Replicator interface {
	Replicate(ctx context.Context, job ReplJob, source, target *grpc.ClientConn) error
}

type defaultReplicator struct {
	log *logrus.Entry
}

func (dr defaultReplicator) Replicate(ctx context.Context, job ReplJob, sourceCC, targetCC *grpc.ClientConn) error {
	repository := &gitalypb.Repository{
		StorageName:  job.TargetNode.Storage,
		RelativePath: job.Repository.RelativePath,
	}

	remoteRepository := &gitalypb.Repository{
		StorageName:  job.SourceNode.Storage,
		RelativePath: job.Repository.RelativePath,
	}

	repositoryClient := gitalypb.NewRepositoryServiceClient(targetCC)
	remoteClient := gitalypb.NewRemoteServiceClient(targetCC)

	// CreateRepository is idempotent
	if _, err := repositoryClient.CreateRepository(ctx, &gitalypb.CreateRepositoryRequest{
		Repository: repository,
	}); err != nil {
		return fmt.Errorf("failed to create repository: %v", err)
	}

	if _, err := remoteClient.FetchInternalRemote(ctx, &gitalypb.FetchInternalRemoteRequest{
		Repository:       repository,
		RemoteRepository: remoteRepository,
	}); err != nil {
		return err
	}

	checksumsMatch, err := dr.confirmChecksums(ctx, gitalypb.NewRepositoryServiceClient(sourceCC), repositoryClient, remoteRepository, repository)
	if err != nil {
		return err
	}

	// TODO: Do something meaninful with the result of confirmChecksums if checksums do not match
	if !checksumsMatch {
		dr.log.WithFields(logrus.Fields{
			"primary": remoteRepository,
			"replica": repository,
		}).Error("checksums do not match")
	}

	// TODO: ensure attribute files are synced
	// https://gitlab.com/gitlab-org/gitaly/issues/1655

	// TODO: ensure objects/info/alternates are synced
	// https://gitlab.com/gitlab-org/gitaly/issues/1674

	return nil
}

func getChecksumFunc(ctx context.Context, client gitalypb.RepositoryServiceClient, repo *gitalypb.Repository, checksum *string) func() error {
	return func() error {
		primaryChecksumRes, err := client.CalculateChecksum(ctx, &gitalypb.CalculateChecksumRequest{
			Repository: repo,
		})
		if err != nil {
			return err
		}
		*checksum = primaryChecksumRes.GetChecksum()
		return nil
	}
}

func (dr defaultReplicator) confirmChecksums(ctx context.Context, primaryClient, replicaClient gitalypb.RepositoryServiceClient, primary, replica *gitalypb.Repository) (bool, error) {
	g, gCtx := errgroup.WithContext(ctx)

	var primaryChecksum, replicaChecksum string

	g.Go(getChecksumFunc(gCtx, primaryClient, primary, &primaryChecksum))
	g.Go(getChecksumFunc(gCtx, replicaClient, replica, &replicaChecksum))

	if err := g.Wait(); err != nil {
		return false, err
	}

	dr.log.WithFields(logrus.Fields{
		"primary":          primary,
		"replica":          replica,
		"primary_checksum": primaryChecksum,
		"replica_checksum": replicaChecksum,
	}).Info("replication finished")

	return primaryChecksum == replicaChecksum, nil
}

// ReplMgr is a replication manager for handling replication jobs
type ReplMgr struct {
	log               *logrus.Entry
	datastore         Datastore
	clientConnections *conn.ClientConnections
	targetNode        string     // which replica is this replicator responsible for?
	replicator        Replicator // does the actual replication logic

	// whitelist contains the project names of the repos we wish to replicate
	whitelist map[string]struct{}
}

// ReplMgrOpt allows a replicator to be configured with additional options
type ReplMgrOpt func(*ReplMgr)

// NewReplMgr initializes a replication manager with the provided dependencies
// and options
func NewReplMgr(targetNode string, log *logrus.Entry, datastore Datastore, c *conn.ClientConnections, opts ...ReplMgrOpt) ReplMgr {
	r := ReplMgr{
		log:               log,
		datastore:         datastore,
		whitelist:         map[string]struct{}{},
		replicator:        defaultReplicator{log},
		targetNode:        targetNode,
		clientConnections: c,
	}

	for _, opt := range opts {
		opt(&r)
	}

	return r
}

// WithWhitelist will configure a whitelist for repos to allow replication
func WithWhitelist(whitelistedRepos []string) ReplMgrOpt {
	return func(r *ReplMgr) {
		for _, repo := range whitelistedRepos {
			r.whitelist[repo] = struct{}{}
		}
	}
}

// WithReplicator overrides the default replicator
func WithReplicator(r Replicator) ReplMgrOpt {
	return func(rm *ReplMgr) {
		rm.replicator = r
	}
}

// ScheduleReplication will store a replication job in the datastore for later
// execution. It filters out projects that are not whitelisted.
// TODO: add a parameter to delay replication
func (r ReplMgr) ScheduleReplication(ctx context.Context, repo models.Repository) error {
	_, ok := r.whitelist[repo.RelativePath]
	if !ok {
		r.log.WithField(logKeyProjectPath, repo.RelativePath).
			Infof("project %q is not whitelisted for replication", repo.RelativePath)
		return nil
	}

	id, err := r.datastore.CreateReplicaReplJobs(repo.RelativePath)
	if err != nil {
		return err
	}

	r.log.WithFields(logrus.Fields{
		logWithReplJobID: id,
		"relative_path":  repo.RelativePath,
	}).Info("replication job created")

	return nil
}

const (
	jobFetchInterval = 10 * time.Millisecond
	logWithReplJobID = "replication_job_id"
)

// ProcessBacklog will process queued jobs. It will block while processing jobs.
func (r ReplMgr) ProcessBacklog(ctx context.Context) error {
	for {
		nodes, err := r.datastore.GetStorageNodes()
		if err != nil {
			return nil
		}

		for _, node := range nodes {
			jobs, err := r.datastore.GetJobs(JobStateReady, node.ID, 10)
			if err != nil {
				return err
			}

			if len(jobs) == 0 {
				r.log.WithFields(logrus.Fields{
					"node_id":          node.ID,
					"recheck_interval": jobFetchInterval,
				}).Trace("no jobs")

				select {
				// TODO: exponential backoff when no queries are returned
				case <-time.After(jobFetchInterval):
					continue

				case <-ctx.Done():
					return ctx.Err()
				}
			}

			for _, job := range jobs {
				r.log.WithFields(logrus.Fields{
					logWithReplJobID: job.ID,
					"from_storage":   job.SourceNode.Storage,
					"to_storage":     job.TargetNode.Storage,
					"relative_path":  job.Repository.RelativePath,
				}).Info("processing replication job")
				if err := r.processReplJob(ctx, job); err != nil {
					return err
				}
			}
		}
	}
}

func (r ReplMgr) processReplJob(ctx context.Context, job ReplJob) error {
	if err := r.datastore.UpdateReplJob(job.ID, JobStateInProgress); err != nil {
		return err
	}

	targetCC, err := r.clientConnections.GetConnection(job.TargetNode.Storage)
	if err != nil {
		return err
	}

	sourceCC, err := r.clientConnections.GetConnection(job.Repository.Primary.Storage)
	if err != nil {
		return err
	}

	injectedCtx, err := helper.InjectGitalyServers(ctx, job.Repository.Primary.Storage, job.SourceNode.Address, job.SourceNode.Token)
	if err != nil {
		return err
	}

	replStart := time.Now()
	incReplicationJobsInFlight()
	defer decReplicationJobsInFlight()

	if err := r.replicator.Replicate(injectedCtx, job, sourceCC, targetCC); err != nil {
		r.log.WithField(logWithReplJobID, job.ID).WithError(err).Error("error when replicating")
		return err
	}

	replDuration := time.Since(replStart)
	recordReplicationLatency(float64(replDuration / time.Millisecond))

	if err := r.datastore.UpdateReplJob(job.ID, JobStateComplete); err != nil {
		return err
	}
	return nil
}