1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
|
package rubyserver
import (
"fmt"
"sync"
"testing"
"time"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly/v15/internal/git/gittest"
"gitlab.com/gitlab-org/gitaly/v15/internal/testhelper"
"gitlab.com/gitlab-org/gitaly/v15/internal/testhelper/testcfg"
"google.golang.org/grpc/codes"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
func waitPing(tb testing.TB, s *Server) {
require.Eventually(tb, func() bool {
return makeRequest(tb, s) == nil
}, ConnectTimeout, 100*time.Millisecond)
}
// This benchmark lets you see what happens when you throw a lot of
// concurrent traffic at gitaly-ruby.
func BenchmarkConcurrency(b *testing.B) {
cfg := testcfg.Build(b)
cfg.Ruby.NumWorkers = 2
s := New(cfg, gittest.NewCommandFactory(b, cfg))
require.NoError(b, s.Start())
defer s.Stop()
waitPing(b, s)
concurrency := 100
b.Run(fmt.Sprintf("concurrency %d", concurrency), func(b *testing.B) {
errCh := make(chan error)
errCount := make(chan int)
go func() {
count := 0
for err := range errCh {
b.Log(err)
count++
}
errCount <- count
}()
wg := &sync.WaitGroup{}
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for j := 0; j < 1000; j++ {
err := makeRequest(b, s)
if err != nil {
errCh <- err
}
switch status.Code(err) {
case codes.Unavailable:
return
case codes.DeadlineExceeded:
return
}
}
}()
}
wg.Wait()
close(errCh)
if count := <-errCount; count != 0 {
b.Fatalf("received %d errors", count)
}
})
}
func makeRequest(tb testing.TB, s *Server) error {
ctx := testhelper.Context(tb)
conn, err := s.getConnection(ctx)
if err != nil {
return err
}
client := healthpb.NewHealthClient(conn)
_, err = client.Check(ctx, &healthpb.HealthCheckRequest{})
return err
}
|