Welcome to mirror list, hosted at ThFree Co, Russian Federation.

coordinator.go « praefect « internal - gitlab.com/gitlab-org/gitaly.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: c238604f36378d748b3d57537c33b8d89550c2b9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
package praefect

import (
	"context"
	"fmt"
	"os"
	"os/signal"
	"sync"
	"syscall"

	gitalyauth "gitlab.com/gitlab-org/gitaly/auth"
	gitalyconfig "gitlab.com/gitlab-org/gitaly/internal/config"
	"gitlab.com/gitlab-org/gitaly/internal/helper"
	"gitlab.com/gitlab-org/gitaly/internal/praefect/models"
	"gitlab.com/gitlab-org/gitaly/internal/praefect/protoregistry"

	"github.com/golang/protobuf/protoc-gen-go/descriptor"
	"github.com/sirupsen/logrus"
	"gitlab.com/gitlab-org/gitaly/client"
	"gitlab.com/gitlab-org/gitaly/internal/praefect/grpc-proxy/proxy"
	"google.golang.org/grpc"
	"google.golang.org/grpc/codes"
	"google.golang.org/grpc/status"
)

// Coordinator takes care of directing client requests to the appropriate
// downstream server. The coordinator is thread safe; concurrent calls to
// register nodes are safe.
type Coordinator struct {
	log           *logrus.Logger
	failoverMutex sync.RWMutex
	connMutex     sync.RWMutex

	datastore Datastore

	nodes    map[string]*grpc.ClientConn
	registry *protoregistry.Registry
}

// NewCoordinator returns a new Coordinator that utilizes the provided logger
func NewCoordinator(l *logrus.Logger, datastore Datastore, fileDescriptors ...*descriptor.FileDescriptorProto) *Coordinator {
	registry := protoregistry.New()
	registry.RegisterFiles(fileDescriptors...)

	return &Coordinator{
		log:       l,
		datastore: datastore,
		nodes:     make(map[string]*grpc.ClientConn),
		registry:  registry,
	}
}

// RegisterProtos allows coordinator to register new protos on the fly
func (c *Coordinator) RegisterProtos(protos ...*descriptor.FileDescriptorProto) error {
	return c.registry.RegisterFiles(protos...)
}

// GetStorageNode returns the registered node for the given storage location
func (c *Coordinator) GetStorageNode(storage string) (Node, error) {
	cc, ok := c.getConn(storage)
	if !ok {
		return Node{}, fmt.Errorf("no node registered for storage location %q", storage)
	}

	return Node{
		Storage: storage,
		cc:      cc,
	}, nil
}

// streamDirector determines which downstream servers receive requests
func (c *Coordinator) streamDirector(ctx context.Context, fullMethodName string, peeker proxy.StreamModifier) (context.Context, *grpc.ClientConn, error) {
	// For phase 1, we need to route messages based on the storage location
	// to the appropriate Gitaly node.
	c.log.Debugf("Stream director received method %s", fullMethodName)

	c.failoverMutex.RLock()
	defer c.failoverMutex.RUnlock()

	serverConfig, err := c.datastore.GetDefaultPrimary()
	if err != nil {
		err := status.Error(
			codes.FailedPrecondition,
			"no downstream node registered",
		)
		return nil, nil, err
	}

	// We only need the primary node, as there's only one primary storage
	// location per praefect at this time
	cc, ok := c.getConn(serverConfig.Name)
	if !ok {
		return nil, nil, fmt.Errorf("unable to find existing client connection for %s", serverConfig.Name)
	}

	ctx, err = helper.InjectGitalyServers(ctx, serverConfig.Name, serverConfig.ListenAddr, serverConfig.Token)
	if err != nil {
		return nil, nil, err
	}

	return ctx, cc, nil
}

// RegisterNode will direct traffic to the supplied downstream connection when the storage location
// is encountered.
func (c *Coordinator) RegisterNode(storageName, listenAddr string) error {
	conn, err := client.Dial(listenAddr,
		[]grpc.DialOption{
			grpc.WithDefaultCallOptions(grpc.CallCustomCodec(proxy.Codec())),
			grpc.WithPerRPCCredentials(gitalyauth.RPCCredentials(gitalyconfig.Config.Auth.Token)),
		},
	)
	if err != nil {
		return err
	}

	c.setConn(storageName, conn)

	return nil
}

func (c *Coordinator) setConn(storageName string, conn *grpc.ClientConn) {
	c.connMutex.Lock()
	c.nodes[storageName] = conn
	c.connMutex.Unlock()
}

func (c *Coordinator) getConn(storageName string) (*grpc.ClientConn, bool) {
	c.connMutex.RLock()
	cc, ok := c.nodes[storageName]
	c.connMutex.RUnlock()

	return cc, ok
}

// FailoverRotation waits for the SIGUSR1 signal, then promotes the next secondary to be primary
func (c *Coordinator) FailoverRotation() {
	c.handleSignalAndRotate()
}

func (c *Coordinator) handleSignalAndRotate() {
	failoverChan := make(chan os.Signal, 1)
	signal.Notify(failoverChan, syscall.SIGUSR1)

	for {
		<-failoverChan

		c.failoverMutex.Lock()
		primary, err := c.datastore.GetDefaultPrimary()
		if err != nil {
			c.log.Fatalf("error when getting default primary: %v", err)
		}

		if err := c.rotateSecondaryToPrimary(primary); err != nil {
			c.log.WithError(err).Error("rotating secondary")
		}
		c.failoverMutex.Unlock()
	}
}

func (c *Coordinator) rotateSecondaryToPrimary(primary models.GitalyServer) error {
	repositories, err := c.datastore.GetRepositoriesForPrimary(primary)
	if err != nil {
		return err
	}

	for _, repoPath := range repositories {
		secondaries, err := c.datastore.GetShardSecondaries(models.Repository{
			RelativePath: repoPath,
		})
		if err != nil {
			return fmt.Errorf("getting secondaries: %v", err)
		}

		newPrimary := secondaries[0]
		secondaries = append(secondaries[1:], primary)

		if err = c.datastore.SetShardPrimary(models.Repository{
			RelativePath: repoPath,
		}, newPrimary); err != nil {
			return fmt.Errorf("setting primary: %v", err)
		}

		if err = c.datastore.SetShardSecondaries(models.Repository{
			RelativePath: repoPath,
		}, secondaries); err != nil {
			return fmt.Errorf("setting secondaries: %v", err)
		}
	}

	// set the new default primary
	primary, err = c.datastore.GetShardPrimary(models.Repository{
		RelativePath: repositories[0],
	})
	if err != nil {
		return fmt.Errorf("getting shard primary: %v", err)
	}

	return c.datastore.SetDefaultPrimary(primary)
}