Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitaly.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacob Vosmaer <jacob@gitlab.com>2018-11-19 14:55:23 +0300
committerJacob Vosmaer <jacob@gitlab.com>2018-11-19 14:55:23 +0300
commit2af2321bf60df9bd5a13cf2a0bdde3d8656bd4f2 (patch)
tree0095b91955cefacfb3c519e65c9b147483d45195
parentb18e75d041444711a5b776a549b40cb09df18df5 (diff)
Revert "Merge branch 'an-upgrade-grpc' into 'master'"revert-61f6c927
This reverts merge request !972
-rw-r--r--client/dial.go44
-rw-r--r--client/dial_test.go50
-rw-r--r--internal/middleware/limithandler/limithandler_test.go5
-rw-r--r--internal/rubyserver/health.go4
-rw-r--r--internal/rubyserver/rubyserver.go11
-rw-r--r--internal/server/auth_test.go7
-rw-r--r--internal/service/blob/testhelper_test.go6
-rw-r--r--internal/service/commit/testhelper_test.go6
-rw-r--r--internal/service/conflicts/resolve_conflicts_test.go2
-rw-r--r--internal/service/conflicts/testhelper_test.go6
-rw-r--r--internal/service/diff/testhelper_test.go6
-rw-r--r--internal/service/namespace/testhelper_test.go6
-rw-r--r--internal/service/operations/cherry_pick_test.go2
-rw-r--r--internal/service/operations/testhelper_test.go6
-rw-r--r--internal/service/ref/testhelper_test.go6
-rw-r--r--internal/service/remote/fetch_internal_remote_test.go2
-rw-r--r--internal/service/remote/testhelper_test.go6
-rw-r--r--internal/service/repository/fetch_test.go6
-rw-r--r--internal/service/repository/testhelper_test.go5
-rw-r--r--internal/service/server/info_test.go6
-rw-r--r--internal/service/smarthttp/testhelper_test.go6
-rw-r--r--internal/service/ssh/receive_pack_test.go2
-rw-r--r--internal/service/ssh/testhelper_test.go6
-rw-r--r--internal/service/ssh/upload_archive_test.go2
-rw-r--r--internal/service/ssh/upload_pack_test.go2
-rw-r--r--internal/service/storage/testhelper_test.go6
-rw-r--r--internal/service/wiki/testhelper_test.go6
-rw-r--r--internal/testhelper/testhelper.go2
-rw-r--r--vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md30
-rw-r--r--vendor/google.golang.org/grpc/CONTRIBUTING.md4
-rw-r--r--vendor/google.golang.org/grpc/Makefile59
-rw-r--r--vendor/google.golang.org/grpc/README.md25
-rw-r--r--vendor/google.golang.org/grpc/backoff.go66
-rw-r--r--vendor/google.golang.org/grpc/balancer.go38
-rw-r--r--vendor/google.golang.org/grpc/balancer/balancer.go76
-rw-r--r--vendor/google.golang.org/grpc/balancer/base/balancer.go1
-rw-r--r--vendor/google.golang.org/grpc/balancer_conn_wrappers.go7
-rw-r--r--vendor/google.golang.org/grpc/balancer_v1_wrapper.go59
-rw-r--r--vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go900
-rw-r--r--vendor/google.golang.org/grpc/call.go328
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go1351
-rw-r--r--vendor/google.golang.org/grpc/codec.go98
-rw-r--r--vendor/google.golang.org/grpc/codes/codes.go36
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials.go101
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials_util_go17.go (renamed from vendor/google.golang.org/grpc/credentials/go17.go)3
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials_util_go18.go (renamed from vendor/google.golang.org/grpc/credentials/go18.go)8
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go (renamed from vendor/google.golang.org/grpc/credentials/go16.go)0
-rw-r--r--vendor/google.golang.org/grpc/credentials/go19.go35
-rw-r--r--vendor/google.golang.org/grpc/dialoptions.go465
-rw-r--r--vendor/google.golang.org/grpc/encoding/encoding.go99
-rw-r--r--vendor/google.golang.org/grpc/encoding/proto/proto.go110
-rw-r--r--vendor/google.golang.org/grpc/go.mod21
-rw-r--r--vendor/google.golang.org/grpc/go.sum34
-rw-r--r--vendor/google.golang.org/grpc/go16.go41
-rw-r--r--vendor/google.golang.org/grpc/go17.go41
-rw-r--r--vendor/google.golang.org/grpc/grpclb.go342
-rw-r--r--vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go615
-rw-r--r--vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto155
-rw-r--r--vendor/google.golang.org/grpc/grpclb_picker.go159
-rw-r--r--vendor/google.golang.org/grpc/grpclb_remote_balancer.go254
-rw-r--r--vendor/google.golang.org/grpc/grpclb_util.go90
-rw-r--r--vendor/google.golang.org/grpc/grpclog/grpclog.go3
-rw-r--r--vendor/google.golang.org/grpc/grpclog/logger.go2
-rw-r--r--vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go239
-rw-r--r--vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto34
-rw-r--r--vendor/google.golang.org/grpc/health/health.go69
-rwxr-xr-xvendor/google.golang.org/grpc/health/regenerate.sh33
-rwxr-xr-xvendor/google.golang.org/grpc/install_gae.sh6
-rw-r--r--vendor/google.golang.org/grpc/interceptor.go4
-rw-r--r--vendor/google.golang.org/grpc/internal/backoff/backoff.go78
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/binarylog.go141
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/env_config.go206
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/method_logger.go426
-rwxr-xr-xvendor/google.golang.org/grpc/internal/binarylog/regenerate.sh33
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/sink.go64
-rw-r--r--vendor/google.golang.org/grpc/internal/binarylog/util.go41
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/funcs.go662
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types.go702
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types_linux.go53
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go44
-rw-r--r--vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go39
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go56
-rw-r--r--vendor/google.golang.org/grpc/internal/grpcsync/event.go61
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go32
-rw-r--r--vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go67
-rw-r--r--vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go47
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/controlbuf.go852
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/defaults.go49
-rw-r--r--vendor/google.golang.org/grpc/internal/transport/flowcontrol.go218
-rw-r--r--vendor/google.golang.org/grpc/keepalive/keepalive.go62
-rw-r--r--vendor/google.golang.org/grpc/metadata/metadata.go89
-rw-r--r--vendor/google.golang.org/grpc/naming/dns_resolver.go6
-rw-r--r--vendor/google.golang.org/grpc/naming/naming.go12
-rw-r--r--vendor/google.golang.org/grpc/picker_wrapper.go43
-rw-r--r--vendor/google.golang.org/grpc/pickfirst.go1
-rw-r--r--vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go406
-rw-r--r--vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto12
-rw-r--r--vendor/google.golang.org/grpc/reflection/serverreflection.go223
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go133
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/go17.go (renamed from vendor/google.golang.org/grpc/internal/envconfig/envconfig.go)22
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/go18.go (renamed from vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go)17
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/go19.go54
-rw-r--r--vendor/google.golang.org/grpc/resolver/dns/pre_go19.go51
-rw-r--r--vendor/google.golang.org/grpc/resolver/resolver.go39
-rw-r--r--vendor/google.golang.org/grpc/resolver_conn_wrapper.go68
-rw-r--r--vendor/google.golang.org/grpc/rpc_util.go520
-rw-r--r--vendor/google.golang.org/grpc/server.go484
-rw-r--r--vendor/google.golang.org/grpc/service_config.go151
-rw-r--r--vendor/google.golang.org/grpc/stats/stats.go2
-rw-r--r--vendor/google.golang.org/grpc/status/go16.go42
-rw-r--r--vendor/google.golang.org/grpc/status/go17.go44
-rw-r--r--vendor/google.golang.org/grpc/status/status.go26
-rw-r--r--vendor/google.golang.org/grpc/stream.go1020
-rw-r--r--vendor/google.golang.org/grpc/transport/bdp_estimator.go (renamed from vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go)0
-rw-r--r--vendor/google.golang.org/grpc/transport/control.go334
-rw-r--r--vendor/google.golang.org/grpc/transport/go16.go (renamed from vendor/google.golang.org/grpc/internal/transport/go16.go)11
-rw-r--r--vendor/google.golang.org/grpc/transport/go17.go (renamed from vendor/google.golang.org/grpc/internal/transport/go17.go)11
-rw-r--r--vendor/google.golang.org/grpc/transport/handler_server.go (renamed from vendor/google.golang.org/grpc/internal/transport/handler_server.go)100
-rw-r--r--vendor/google.golang.org/grpc/transport/http2_client.go (renamed from vendor/google.golang.org/grpc/internal/transport/http2_client.go)1254
-rw-r--r--vendor/google.golang.org/grpc/transport/http2_server.go (renamed from vendor/google.golang.org/grpc/internal/transport/http2_server.go)887
-rw-r--r--vendor/google.golang.org/grpc/transport/http_util.go (renamed from vendor/google.golang.org/grpc/internal/transport/http_util.go)252
-rw-r--r--vendor/google.golang.org/grpc/transport/log.go (renamed from vendor/google.golang.org/grpc/internal/transport/log.go)6
-rw-r--r--vendor/google.golang.org/grpc/transport/transport.go (renamed from vendor/google.golang.org/grpc/internal/transport/transport.go)405
-rw-r--r--vendor/google.golang.org/grpc/version.go22
-rwxr-xr-xvendor/google.golang.org/grpc/vet.sh117
-rw-r--r--vendor/vendor.json372
126 files changed, 6248 insertions, 11186 deletions
diff --git a/client/dial.go b/client/dial.go
index b92e5a0b2..89f2a10b0 100644
--- a/client/dial.go
+++ b/client/dial.go
@@ -1,6 +1,12 @@
package client
import (
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+ "time"
+
"google.golang.org/grpc"
)
@@ -10,12 +16,46 @@ var DefaultDialOpts = []grpc.DialOption{
}
// Dial gitaly
-// Deprecated: Use grpc.Dial directly instead
func Dial(rawAddress string, connOpts []grpc.DialOption) (*grpc.ClientConn, error) {
- conn, err := grpc.Dial(rawAddress, connOpts...)
+ network, addr, err := parseAddress(rawAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ connOpts = append(connOpts,
+ grpc.WithDialer(func(a string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout(network, a, timeout)
+ }))
+ conn, err := grpc.Dial(addr, connOpts...)
if err != nil {
return nil, err
}
return conn, nil
}
+
+func parseAddress(rawAddress string) (network, addr string, err error) {
+ // Parsing unix:// URL's with url.Parse does not give the result we want
+ // so we do it manually.
+ for _, prefix := range []string{"unix://", "unix:"} {
+ if strings.HasPrefix(rawAddress, prefix) {
+ return "unix", strings.TrimPrefix(rawAddress, prefix), nil
+ }
+ }
+
+ u, err := url.Parse(rawAddress)
+ if err != nil {
+ return "", "", err
+ }
+
+ if u.Scheme != "tcp" {
+ return "", "", fmt.Errorf("unknown scheme: %q", rawAddress)
+ }
+ if u.Host == "" {
+ return "", "", fmt.Errorf("network tcp requires host: %q", rawAddress)
+ }
+ if u.Path != "" {
+ return "", "", fmt.Errorf("network tcp should have no path: %q", rawAddress)
+ }
+ return "tcp", u.Host, nil
+}
diff --git a/client/dial_test.go b/client/dial_test.go
new file mode 100644
index 000000000..d0513fc81
--- /dev/null
+++ b/client/dial_test.go
@@ -0,0 +1,50 @@
+package client
+
+import (
+ "testing"
+)
+
+func TestParseAddress(t *testing.T) {
+ testCases := []struct {
+ raw string
+ network string
+ addr string
+ invalid bool
+ }{
+ {raw: "unix:/foo/bar.socket", network: "unix", addr: "/foo/bar.socket"},
+ {raw: "unix:///foo/bar.socket", network: "unix", addr: "/foo/bar.socket"},
+ // Mainly for test purposes we explicitly want to support relative paths
+ {raw: "unix://foo/bar.socket", network: "unix", addr: "foo/bar.socket"},
+ {raw: "unix:foo/bar.socket", network: "unix", addr: "foo/bar.socket"},
+ {raw: "tcp://1.2.3.4", network: "tcp", addr: "1.2.3.4"},
+ {raw: "tcp://1.2.3.4:567", network: "tcp", addr: "1.2.3.4:567"},
+ {raw: "tcp://foobar", network: "tcp", addr: "foobar"},
+ {raw: "tcp://foobar:567", network: "tcp", addr: "foobar:567"},
+ {raw: "tcp://1.2.3.4/foo/bar.socket", invalid: true},
+ {raw: "tcp:///foo/bar.socket", invalid: true},
+ {raw: "tcp:/foo/bar.socket", invalid: true},
+ }
+
+ for _, tc := range testCases {
+ network, addr, err := parseAddress(tc.raw)
+
+ if err == nil && tc.invalid {
+ t.Errorf("%v: expected error, got none", tc)
+ } else if err != nil && !tc.invalid {
+ t.Errorf("%v: parse error: %v", tc, err)
+ continue
+ }
+
+ if tc.invalid {
+ continue
+ }
+
+ if tc.network != network {
+ t.Errorf("%v: expected %q, got %q", tc, tc.network, network)
+ }
+
+ if tc.addr != addr {
+ t.Errorf("%v: expected %q, got %q", tc, tc.addr, addr)
+ }
+ }
+}
diff --git a/internal/middleware/limithandler/limithandler_test.go b/internal/middleware/limithandler/limithandler_test.go
index 4a6e6ffc8..ee5e24b10 100644
--- a/internal/middleware/limithandler/limithandler_test.go
+++ b/internal/middleware/limithandler/limithandler_test.go
@@ -207,12 +207,15 @@ func runServer(t *testing.T, s *server, opt ...grpc.ServerOption) (*grpc.Server,
go grpcServer.Serve(lis)
- return grpcServer, "unix://" + serverSocketPath
+ return grpcServer, serverSocketPath
}
func newClient(t *testing.T, serverSocketPath string) (pb.TestClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/rubyserver/health.go b/internal/rubyserver/health.go
index 10550d7dc..fd7b32747 100644
--- a/internal/rubyserver/health.go
+++ b/internal/rubyserver/health.go
@@ -1,6 +1,7 @@
package rubyserver
import (
+ "net"
"time"
"golang.org/x/net/context"
@@ -12,6 +13,9 @@ func ping(address string) error {
conn, err := grpc.Dial(
address,
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
)
if err != nil {
return err
diff --git a/internal/rubyserver/rubyserver.go b/internal/rubyserver/rubyserver.go
index 99cb290a5..3fdfc31f7 100644
--- a/internal/rubyserver/rubyserver.go
+++ b/internal/rubyserver/rubyserver.go
@@ -3,6 +3,7 @@ package rubyserver
import (
"fmt"
"io/ioutil"
+ "net"
"os"
"path"
"path/filepath"
@@ -125,7 +126,6 @@ func Start() (*Server, error) {
for i := 0; i < numWorkers; i++ {
name := fmt.Sprintf("gitaly-ruby.%d", i)
socketPath := socketPath(i)
- address := "unix://" + socketPath
// Use 'ruby-cd' to make sure gitaly-ruby has the same working directory
// as the current process. This is a hack to sort-of support relative
@@ -133,13 +133,13 @@ func Start() (*Server, error) {
args := []string{"bundle", "exec", "bin/ruby-cd", wd, gitalyRuby, strconv.Itoa(os.Getpid()), socketPath}
events := make(chan supervisor.Event)
- check := func() error { return ping(address) }
+ check := func() error { return ping(socketPath) }
p, err := supervisor.New(name, env, args, cfg.Ruby.Dir, cfg.Ruby.MaxRSS, events, check)
if err != nil {
return nil, err
}
- s.workers = append(s.workers, newWorker(p, address, events, false))
+ s.workers = append(s.workers, newWorker(p, socketPath, events, false))
}
return s, nil
@@ -240,7 +240,7 @@ func (s *Server) createConnection(ctx context.Context) (*grpc.ClientConn, error)
dialCtx, cancel := context.WithTimeout(ctx, ConnectTimeout)
defer cancel()
- conn, err := grpc.DialContext(dialCtx, balancer.Scheme+":///gitaly-ruby", dialOptions()...)
+ conn, err := grpc.DialContext(dialCtx, balancer.Scheme+"://gitaly-ruby", dialOptions()...)
if err != nil {
return nil, err
}
@@ -253,6 +253,9 @@ func dialOptions() []grpc.DialOption {
return []grpc.DialOption{
grpc.WithBlock(), // With this we get retries. Without, connections fail fast.
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor),
}
diff --git a/internal/server/auth_test.go b/internal/server/auth_test.go
index be711ac3b..b50445a84 100644
--- a/internal/server/auth_test.go
+++ b/internal/server/auth_test.go
@@ -3,6 +3,7 @@ package server
import (
"net"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -145,6 +146,10 @@ func (brokenAuth) GetRequestMetadata(netctx.Context, ...string) (map[string]stri
}
func dial(serverSocketPath string, opts []grpc.DialOption) (*grpc.ClientConn, error) {
+ opts = append(opts, grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }))
+
return grpc.Dial(serverSocketPath, opts...)
}
@@ -166,5 +171,5 @@ func runServer(t *testing.T) (*grpc.Server, string) {
require.NoError(t, err)
go srv.Serve(listener)
- return srv, "unix://" + serverSocketPath
+ return srv, serverSocketPath
}
diff --git a/internal/service/blob/testhelper_test.go b/internal/service/blob/testhelper_test.go
index 95cc08caf..19a7bc534 100644
--- a/internal/service/blob/testhelper_test.go
+++ b/internal/service/blob/testhelper_test.go
@@ -5,6 +5,7 @@ import (
"net"
"os"
"testing"
+ "time"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
"gitlab.com/gitlab-org/gitaly/internal/rubyserver"
@@ -48,12 +49,15 @@ func runBlobServer(t *testing.T) (*grpc.Server, string) {
go grpcServer.Serve(listener)
- return grpcServer, "unix://" + serverSocketPath
+ return grpcServer, serverSocketPath
}
func newBlobClient(t *testing.T, serverSocketPath string) (gitalypb.BlobServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/commit/testhelper_test.go b/internal/service/commit/testhelper_test.go
index caa3a29b9..4747082cc 100644
--- a/internal/service/commit/testhelper_test.go
+++ b/internal/service/commit/testhelper_test.go
@@ -4,6 +4,7 @@ import (
"net"
"os"
"testing"
+ "time"
"github.com/golang/protobuf/ptypes/timestamp"
log "github.com/sirupsen/logrus"
@@ -53,12 +54,15 @@ func startTestServices(t *testing.T) (*grpc.Server, string) {
reflection.Register(server)
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func newCommitServiceClient(t *testing.T, serviceSocketPath string) (gitalypb.CommitServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serviceSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/conflicts/resolve_conflicts_test.go b/internal/service/conflicts/resolve_conflicts_test.go
index 20bb2c298..02c593c68 100644
--- a/internal/service/conflicts/resolve_conflicts_test.go
+++ b/internal/service/conflicts/resolve_conflicts_test.go
@@ -318,5 +318,5 @@ func runFullServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
diff --git a/internal/service/conflicts/testhelper_test.go b/internal/service/conflicts/testhelper_test.go
index 713a971b8..e58298096 100644
--- a/internal/service/conflicts/testhelper_test.go
+++ b/internal/service/conflicts/testhelper_test.go
@@ -4,6 +4,7 @@ import (
"net"
"os"
"testing"
+ "time"
log "github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
@@ -48,12 +49,15 @@ func runConflictsServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func NewConflictsClient(t *testing.T, serverSocketPath string) (gitalypb.ConflictsServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
diff --git a/internal/service/diff/testhelper_test.go b/internal/service/diff/testhelper_test.go
index bd5d79264..29a370429 100644
--- a/internal/service/diff/testhelper_test.go
+++ b/internal/service/diff/testhelper_test.go
@@ -4,6 +4,7 @@ import (
"net"
"os"
"testing"
+ "time"
log "github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
@@ -48,12 +49,15 @@ func runDiffServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func newDiffClient(t *testing.T, serverSocketPath string) (gitalypb.DiffServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
diff --git a/internal/service/namespace/testhelper_test.go b/internal/service/namespace/testhelper_test.go
index 8d76d82ef..3e08713d0 100644
--- a/internal/service/namespace/testhelper_test.go
+++ b/internal/service/namespace/testhelper_test.go
@@ -3,6 +3,7 @@ package namespace
import (
"net"
"testing"
+ "time"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
"gitlab.com/gitlab-org/gitaly/internal/testhelper"
@@ -24,12 +25,15 @@ func runNamespaceServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func newNamespaceClient(t *testing.T, serverSocketPath string) (gitalypb.NamespaceServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/operations/cherry_pick_test.go b/internal/service/operations/cherry_pick_test.go
index dbdc04c27..d9440d2cb 100644
--- a/internal/service/operations/cherry_pick_test.go
+++ b/internal/service/operations/cherry_pick_test.go
@@ -409,5 +409,5 @@ func runFullServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
diff --git a/internal/service/operations/testhelper_test.go b/internal/service/operations/testhelper_test.go
index 3a5408f6b..dc182a93e 100644
--- a/internal/service/operations/testhelper_test.go
+++ b/internal/service/operations/testhelper_test.go
@@ -7,6 +7,7 @@ import (
"os"
"path"
"testing"
+ "time"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
@@ -86,12 +87,15 @@ func runOperationServiceServer(t *testing.T) (*grpc.Server, string) {
go grpcServer.Serve(listener)
- return grpcServer, "unix://" + serverSocketPath
+ return grpcServer, serverSocketPath
}
func newOperationClient(t *testing.T, serverSocketPath string) (gitalypb.OperationServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/ref/testhelper_test.go b/internal/service/ref/testhelper_test.go
index 3d7da42bb..10cfa05e7 100644
--- a/internal/service/ref/testhelper_test.go
+++ b/internal/service/ref/testhelper_test.go
@@ -5,6 +5,7 @@ import (
"net"
"os"
"testing"
+ "time"
"github.com/golang/protobuf/ptypes/timestamp"
log "github.com/sirupsen/logrus"
@@ -112,12 +113,15 @@ func runRefServiceServer(t *testing.T) (*grpc.Server, string) {
go grpcServer.Serve(listener)
- return grpcServer, "unix://" + serverSocketPath
+ return grpcServer, serverSocketPath
}
func newRefServiceClient(t *testing.T, serverSocketPath string) (gitalypb.RefServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/remote/fetch_internal_remote_test.go b/internal/service/remote/fetch_internal_remote_test.go
index 2d9bfc4d5..22bfab29c 100644
--- a/internal/service/remote/fetch_internal_remote_test.go
+++ b/internal/service/remote/fetch_internal_remote_test.go
@@ -124,5 +124,5 @@ func runFullServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
diff --git a/internal/service/remote/testhelper_test.go b/internal/service/remote/testhelper_test.go
index 3f37b1c60..152c5b2be 100644
--- a/internal/service/remote/testhelper_test.go
+++ b/internal/service/remote/testhelper_test.go
@@ -5,6 +5,7 @@ import (
"net"
"os"
"testing"
+ "time"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
"gitlab.com/gitlab-org/gitaly/internal/rubyserver"
@@ -50,12 +51,15 @@ func runRemoteServiceServer(t *testing.T) (*grpc.Server, string) {
go grpcServer.Serve(listener)
- return grpcServer, "unix://" + serverSocketPath
+ return grpcServer, serverSocketPath
}
func NewRemoteClient(t *testing.T, serverSocketPath string) (gitalypb.RemoteServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/repository/fetch_test.go b/internal/service/repository/fetch_test.go
index 4d1d9756d..a20160ad0 100644
--- a/internal/service/repository/fetch_test.go
+++ b/internal/service/repository/fetch_test.go
@@ -4,6 +4,7 @@ import (
"net"
"os"
"testing"
+ "time"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
@@ -153,6 +154,9 @@ func TestFetchFullServerRequiresAuthentication(t *testing.T) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
@@ -193,5 +197,5 @@ func runFullServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
diff --git a/internal/service/repository/testhelper_test.go b/internal/service/repository/testhelper_test.go
index e2a8c1f95..20a8b6ef5 100644
--- a/internal/service/repository/testhelper_test.go
+++ b/internal/service/repository/testhelper_test.go
@@ -30,6 +30,9 @@ var (
func newRepositoryClient(t *testing.T, serverSocketPath string) (gitalypb.RepositoryServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
grpc.WithPerRPCCredentials(gitalyauth.RPCCredentials(testhelper.RepositoryAuthToken)),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
@@ -59,7 +62,7 @@ func runRepoServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func assertModTimeAfter(t *testing.T, afterTime time.Time, paths ...string) bool {
diff --git a/internal/service/server/info_test.go b/internal/service/server/info_test.go
index 374b4c785..5f10b0774 100644
--- a/internal/service/server/info_test.go
+++ b/internal/service/server/info_test.go
@@ -3,6 +3,7 @@ package server
import (
"net"
"testing"
+ "time"
"github.com/stretchr/testify/require"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
@@ -70,12 +71,15 @@ func runServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func newServerClient(t *testing.T, serverSocketPath string) (gitalypb.ServerServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
grpc.WithPerRPCCredentials(gitalyauth.RPCCredentials(testhelper.RepositoryAuthToken)),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
diff --git a/internal/service/smarthttp/testhelper_test.go b/internal/service/smarthttp/testhelper_test.go
index 23db1f2ac..ee2642b2e 100644
--- a/internal/service/smarthttp/testhelper_test.go
+++ b/internal/service/smarthttp/testhelper_test.go
@@ -3,6 +3,7 @@ package smarthttp
import (
"net"
"testing"
+ "time"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
"gitlab.com/gitlab-org/gitaly/internal/testhelper"
@@ -28,12 +29,15 @@ func runSmartHTTPServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func newSmartHTTPClient(t *testing.T, serverSocketPath string) (gitalypb.SmartHTTPServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/ssh/receive_pack_test.go b/internal/service/ssh/receive_pack_test.go
index 5b880cebc..55f1e4740 100644
--- a/internal/service/ssh/receive_pack_test.go
+++ b/internal/service/ssh/receive_pack_test.go
@@ -201,7 +201,7 @@ func testCloneAndPush(t *testing.T, serverSocketPath string, params pushParams)
cmd := exec.Command("git", "-C", localRepoPath, "push", "-v", "git@localhost:test/test.git", "master")
cmd.Env = []string{
fmt.Sprintf("GITALY_PAYLOAD=%s", payload),
- fmt.Sprintf("GITALY_ADDRESS=%s", serverSocketPath),
+ fmt.Sprintf("GITALY_ADDRESS=unix:%s", serverSocketPath),
fmt.Sprintf("PATH=%s", ".:"+os.Getenv("PATH")),
fmt.Sprintf(`GIT_SSH_COMMAND=%s receive-pack`, gitalySSHPath),
}
diff --git a/internal/service/ssh/testhelper_test.go b/internal/service/ssh/testhelper_test.go
index b22fd7c33..68ced139c 100644
--- a/internal/service/ssh/testhelper_test.go
+++ b/internal/service/ssh/testhelper_test.go
@@ -5,6 +5,7 @@ import (
"os"
"path"
"testing"
+ "time"
log "github.com/sirupsen/logrus"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
@@ -71,12 +72,15 @@ func runSSHServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func newSSHClient(t *testing.T, serverSocketPath string) (gitalypb.SSHServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/ssh/upload_archive_test.go b/internal/service/ssh/upload_archive_test.go
index 8264b6314..c639c003b 100644
--- a/internal/service/ssh/upload_archive_test.go
+++ b/internal/service/ssh/upload_archive_test.go
@@ -81,7 +81,7 @@ func testArchive(t *testing.T, serverSocketPath string, testRepo *gitalypb.Repos
require.NoError(t, err)
cmd.Env = []string{
- fmt.Sprintf("GITALY_ADDRESS=%s", serverSocketPath),
+ fmt.Sprintf("GITALY_ADDRESS=unix:%s", serverSocketPath),
fmt.Sprintf("GITALY_PAYLOAD=%s", payload),
fmt.Sprintf("PATH=%s", ".:"+os.Getenv("PATH")),
fmt.Sprintf(`GIT_SSH_COMMAND=%s upload-archive`, gitalySSHPath),
diff --git a/internal/service/ssh/upload_pack_test.go b/internal/service/ssh/upload_pack_test.go
index a52cc99ae..d62734e2f 100644
--- a/internal/service/ssh/upload_pack_test.go
+++ b/internal/service/ssh/upload_pack_test.go
@@ -188,7 +188,7 @@ func testClone(t *testing.T, serverSocketPath, storageName, relativePath, localR
require.NoError(t, err)
cmd.Env = []string{
- fmt.Sprintf("GITALY_ADDRESS=%s", serverSocketPath),
+ fmt.Sprintf("GITALY_ADDRESS=unix:%s", serverSocketPath),
fmt.Sprintf("GITALY_PAYLOAD=%s", payload),
fmt.Sprintf("PATH=%s", ".:"+os.Getenv("PATH")),
fmt.Sprintf(`GIT_SSH_COMMAND=%s upload-pack`, gitalySSHPath),
diff --git a/internal/service/storage/testhelper_test.go b/internal/service/storage/testhelper_test.go
index 4694edea0..aeb405ca6 100644
--- a/internal/service/storage/testhelper_test.go
+++ b/internal/service/storage/testhelper_test.go
@@ -5,6 +5,7 @@ import (
"os"
"path/filepath"
"testing"
+ "time"
"gitlab.com/gitlab-org/gitaly-proto/go/gitalypb"
"gitlab.com/gitlab-org/gitaly/internal/config"
@@ -53,12 +54,15 @@ func runStorageServer(t *testing.T) (*grpc.Server, string) {
go server.Serve(listener)
- return server, "unix://" + serverSocketPath
+ return server, serverSocketPath
}
func newStorageClient(t *testing.T, serverSocketPath string) (gitalypb.StorageServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/service/wiki/testhelper_test.go b/internal/service/wiki/testhelper_test.go
index 1637d1ffc..7d6b2e503 100644
--- a/internal/service/wiki/testhelper_test.go
+++ b/internal/service/wiki/testhelper_test.go
@@ -7,6 +7,7 @@ import (
"path"
"strings"
"testing"
+ "time"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/require"
@@ -64,12 +65,15 @@ func runWikiServiceServer(t *testing.T) (*grpc.Server, string) {
go grpcServer.Serve(listener)
- return grpcServer, "unix://" + serverSocketPath
+ return grpcServer, serverSocketPath
}
func newWikiClient(t *testing.T, serverSocketPath string) (gitalypb.WikiServiceClient, *grpc.ClientConn) {
connOpts := []grpc.DialOption{
grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }),
}
conn, err := grpc.Dial(serverSocketPath, connOpts...)
if err != nil {
diff --git a/internal/testhelper/testhelper.go b/internal/testhelper/testhelper.go
index a612dd3a2..285de96bf 100644
--- a/internal/testhelper/testhelper.go
+++ b/internal/testhelper/testhelper.go
@@ -86,7 +86,7 @@ func GitlabTestStoragePath() string {
func GitalyServersMetadata(t *testing.T, serverSocketPath string) metadata.MD {
gitalyServers := storage.GitalyServers{
"default": {
- "address": serverSocketPath,
+ "address": "unix:" + serverSocketPath,
"token": RepositoryAuthToken,
},
}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
deleted file mode 100644
index 0e64822d2..000000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
-and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
-
-Types of changes:
-- `Added` for new features.
-- `Changed` for changes in existing functionality.
-- `Deprecated` for soon-to-be removed features.
-- `Removed` for now removed features.
-- `Fixed` for any bug fixes.
-- `Security` in case of vulnerabilities.
-
-## [Unreleased]
-### Added
-- This CHANGELOG file to keep track of changes.
-
-## 1.0.0 - 2018-05-08
-### Added
-- grpc_auth
-- grpc_ctxtags
-- grpc_zap
-- grpc_logrus
-- grpc_opentracing
-- grpc_retry
-- grpc_validator
-- grpc_recovery
-
-[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...HEAD
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 0863eb26b..8ec6c9574 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -27,10 +27,6 @@ How to get your contributions merged smoothly and quickly.
- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change).
- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on.
- - `make all` to test everything, OR
- - `make vet` to catch vet errors
- - `make test` to run the tests
- - `make testrace` to run tests in race mode
- Exceptions to the rules can be made if there's a compelling reason for doing so.
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index 41a754f97..c44534376 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -1,14 +1,20 @@
-all: vet test testrace testappengine
-
-build: deps
- go build google.golang.org/grpc/...
-
-clean:
- go clean -i google.golang.org/grpc/...
+all: test testrace
deps:
go get -d -v google.golang.org/grpc/...
+updatedeps:
+ go get -d -v -u -f google.golang.org/grpc/...
+
+testdeps:
+ go get -d -v -t google.golang.org/grpc/...
+
+updatetestdeps:
+ go get -d -v -t -u -f google.golang.org/grpc/...
+
+build: deps
+ go build google.golang.org/grpc/...
+
proto:
@ if ! which protoc > /dev/null; then \
echo "error: protoc not installed" >&2; \
@@ -17,44 +23,23 @@ proto:
go generate google.golang.org/grpc/...
test: testdeps
- go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-
-testappengine: testappenginedeps
- goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-
-testappenginedeps:
- goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
-
-testdeps:
- go get -d -v -t google.golang.org/grpc/...
+ go test -cpu 1,4 -timeout 5m google.golang.org/grpc/...
testrace: testdeps
go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-updatedeps:
- go get -d -v -u -f google.golang.org/grpc/...
-
-updatetestdeps:
- go get -d -v -t -u -f google.golang.org/grpc/...
-
-vet: vetdeps
- ./vet.sh
-
-vetdeps:
- ./vet.sh -install
+clean:
+ go clean -i google.golang.org/grpc/...
.PHONY: \
all \
- build \
- clean \
deps \
+ updatedeps \
+ testdeps \
+ updatetestdeps \
+ build \
proto \
test \
- testappengine \
- testappenginedeps \
- testdeps \
testrace \
- updatedeps \
- updatetestdeps \
- vet \
- vetdeps
+ clean \
+ coverage
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index 29ffb00d3..118327bb1 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -16,7 +16,8 @@ $ go get -u google.golang.org/grpc
Prerequisites
-------------
-This requires Go 1.6 or later. Go 1.7 will be required soon.
+This requires Go 1.6 or later. Go 1.7 will be required as of the next gRPC-Go
+release (1.8).
Constraints
-----------
@@ -43,25 +44,3 @@ Please update proto package, gRPC package and rebuild the proto files:
- `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- `go get -u google.golang.org/grpc`
- `protoc --go_out=plugins=grpc:. *.proto`
-
-#### How to turn on logging
-
-The default logger is controlled by the environment variables. Turn everything
-on by setting:
-
-```
-GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
-```
-
-#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
-
-This error means the connection the RPC is using was closed, and there are many
-possible reasons, including:
- 1. mis-configured transport credentials, connection failed on handshaking
- 1. bytes disrupted, possibly by a proxy in between
- 1. server shutdown
-
-It can be tricky to debug this because the error happens on the client side but
-the root cause of the connection being closed is on the server side. Turn on
-logging on __both client and server__, and see if there are any transport
-errors.
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index fa31565fd..c40facce5 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -16,23 +16,81 @@
*
*/
-// See internal/backoff package for the backoff implementation. This file is
-// kept for the exported types and API backward compatility.
-
package grpc
import (
+ "math/rand"
"time"
)
// DefaultBackoffConfig uses values specified for backoff in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
var DefaultBackoffConfig = BackoffConfig{
- MaxDelay: 120 * time.Second,
+ MaxDelay: 120 * time.Second,
+ baseDelay: 1.0 * time.Second,
+ factor: 1.6,
+ jitter: 0.2,
+}
+
+// backoffStrategy defines the methodology for backing off after a grpc
+// connection failure.
+//
+// This is unexported until the gRPC project decides whether or not to allow
+// alternative backoff strategies. Once a decision is made, this type and its
+// method may be exported.
+type backoffStrategy interface {
+ // backoff returns the amount of time to wait before the next retry given
+ // the number of consecutive failures.
+ backoff(retries int) time.Duration
}
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
type BackoffConfig struct {
// MaxDelay is the upper bound of backoff delay.
MaxDelay time.Duration
+
+ // TODO(stevvooe): The following fields are not exported, as allowing
+ // changes would violate the current gRPC specification for backoff. If
+ // gRPC decides to allow more interesting backoff strategies, these fields
+ // may be opened up in the future.
+
+ // baseDelay is the amount of time to wait before retrying after the first
+ // failure.
+ baseDelay time.Duration
+
+ // factor is applied to the backoff after each retry.
+ factor float64
+
+ // jitter provides a range to randomize backoff delays.
+ jitter float64
+}
+
+func setDefaults(bc *BackoffConfig) {
+ md := bc.MaxDelay
+ *bc = DefaultBackoffConfig
+
+ if md > 0 {
+ bc.MaxDelay = md
+ }
+}
+
+func (bc BackoffConfig) backoff(retries int) time.Duration {
+ if retries == 0 {
+ return bc.baseDelay
+ }
+ backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
+ for backoff < max && retries > 0 {
+ backoff *= bc.factor
+ retries--
+ }
+ if backoff > max {
+ backoff = max
+ }
+ // Randomize backoff delays so that if a cluster of requests start at
+ // the same time, they won't operate in lockstep.
+ backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
+ if backoff < 0 {
+ return 0
+ }
+ return time.Duration(backoff)
}
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
index 5aeb646d1..300da6c5e 100644
--- a/vendor/google.golang.org/grpc/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -19,6 +19,7 @@
package grpc
import (
+ "fmt"
"net"
"sync"
@@ -31,8 +32,7 @@ import (
)
// Address represents a server the client connects to.
-//
-// Deprecated: please use package balancer.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
@@ -42,8 +42,6 @@ type Address struct {
}
// BalancerConfig specifies the configurations for Balancer.
-//
-// Deprecated: please use package balancer.
type BalancerConfig struct {
// DialCreds is the transport credential the Balancer implementation can
// use to dial to a remote load balancer server. The Balancer implementations
@@ -56,8 +54,7 @@ type BalancerConfig struct {
}
// BalancerGetOptions configures a Get call.
-//
-// Deprecated: please use package balancer.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
type BalancerGetOptions struct {
// BlockingWait specifies whether Get should block when there is no
// connected address.
@@ -65,8 +62,7 @@ type BalancerGetOptions struct {
}
// Balancer chooses network addresses for RPCs.
-//
-// Deprecated: please use package balancer.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
type Balancer interface {
// Start does the initialization work to bootstrap a Balancer. For example,
// this function may start the name resolution and watch the updates. It will
@@ -117,10 +113,28 @@ type Balancer interface {
Close() error
}
+// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
+// call of Balancer.
+type downErr struct {
+ timeout bool
+ temporary bool
+ desc string
+}
+
+func (e downErr) Error() string { return e.desc }
+func (e downErr) Timeout() bool { return e.timeout }
+func (e downErr) Temporary() bool { return e.temporary }
+
+func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
+ return downErr{
+ timeout: timeout,
+ temporary: temporary,
+ desc: fmt.Sprintf(format, a...),
+ }
+}
+
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
// the name resolution updates and updates the addresses available correspondingly.
-//
-// Deprecated: please use package balancer/roundrobin.
func RoundRobin(r naming.Resolver) Balancer {
return &roundRobin{r: r}
}
@@ -389,3 +403,7 @@ func (rr *roundRobin) Close() error {
type pickFirst struct {
*roundRobin
}
+
+func pickFirstBalancerV1(r naming.Resolver) Balancer {
+ return &pickFirst{&roundRobin{r: r}}
+}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index ee1703f03..219a2940c 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -28,7 +28,6 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
)
@@ -37,12 +36,9 @@ var (
m = make(map[string]Builder)
)
-// Register registers the balancer builder to the balancer map. b.Name
-// (lowercased) will be used as the name registered with this builder.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Balancers are
-// registered with the same name, the one registered last will take effect.
+// Register registers the balancer builder to the balancer map.
+// b.Name (lowercased) will be used as the name registered with
+// this builder.
func Register(b Builder) {
m[strings.ToLower(b.Name())] = b
}
@@ -89,12 +85,7 @@ type SubConn interface {
}
// NewSubConnOptions contains options to create new SubConn.
-type NewSubConnOptions struct {
- // CredsBundle is the credentials bundle that will be used in the created
- // SubConn. If it's nil, the original creds from grpc DialOptions will be
- // used.
- CredsBundle credentials.Bundle
-}
+type NewSubConnOptions struct{}
// ClientConn represents a gRPC ClientConn.
//
@@ -131,14 +122,10 @@ type BuildOptions struct {
// use to dial to a remote load balancer server. The Balancer implementations
// can ignore this if it does not need to talk to another party securely.
DialCreds credentials.TransportCredentials
- // CredsBundle is the credentials bundle that the Balancer can use.
- CredsBundle credentials.Bundle
// Dialer is the custom dialer the Balancer implementation can use to dial
// to a remote load balancer server. The Balancer implementations
// can ignore this if it doesn't need to talk to remote balancer.
Dialer func(context.Context, string) (net.Conn, error)
- // ChannelzParentID is the entity parent's channelz unique identification number.
- ChannelzParentID int64
}
// Builder creates a balancer.
@@ -151,21 +138,12 @@ type Builder interface {
}
// PickOptions contains addition information for the Pick operation.
-type PickOptions struct {
- // FullMethodName is the method name that NewClientStream() is called
- // with. The canonical format is /service/Method.
- FullMethodName string
- // Header contains the metadata from the RPC's client header. The metadata
- // should not be modified; make a copy first if needed.
- Header metadata.MD
-}
+type PickOptions struct{}
// DoneInfo contains additional information for done.
type DoneInfo struct {
// Err is the rpc error the RPC finished with. It could be nil.
Err error
- // Trailer contains the metadata from the RPC's trailer, if present.
- Trailer metadata.MD
// BytesSent indicates if any bytes have been sent to the server.
BytesSent bool
// BytesReceived indicates if any byte has been received from the server.
@@ -182,7 +160,7 @@ var (
)
// Picker is used by gRPC to pick a SubConn to send an RPC.
-// Balancer is expected to generate a new picker from its snapshot every time its
+// Balancer is expected to generate a new picker from its snapshot everytime its
// internal state has changed.
//
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
@@ -243,45 +221,3 @@ type Balancer interface {
// ClientConn.RemoveSubConn for its existing SubConns.
Close()
}
-
-// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
-// and returns one aggregated connectivity state.
-//
-// It's not thread safe.
-type ConnectivityStateEvaluator struct {
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
- numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// RecordTransition records state change happening in subConn and based on that
-// it evaluates what aggregated state should be.
-//
-// - If at least one SubConn in Ready, the aggregated state is Ready;
-// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-// - Else the aggregated state is TransientFailure.
-//
-// Idle and Shutdown are not considered.
-func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
- // Update counters.
- for idx, state := range []connectivity.State{oldState, newState} {
- updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
- switch state {
- case connectivity.Ready:
- cse.numReady += updateVal
- case connectivity.Connecting:
- cse.numConnecting += updateVal
- case connectivity.TransientFailure:
- cse.numTransientFailure += updateVal
- }
- }
-
- // Evaluate.
- if cse.numReady > 0 {
- return connectivity.Ready
- }
- if cse.numConnecting > 0 {
- return connectivity.Connecting
- }
- return connectivity.TransientFailure
-}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index 23d13511b..1e962b724 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -146,6 +146,7 @@ func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectiv
}
b.cc.UpdateBalancerState(b.state, b.picker)
+ return
}
// Close is a nop because base balancer doesn't have internal state to clean up,
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
index 1ab95fde2..db6f0ae3f 100644
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
@@ -115,7 +115,7 @@ func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.Bui
return ccb
}
-// watcher balancer functions sequentially, so the balancer can be implemented
+// watcher balancer functions sequencially, so the balancer can be implemeneted
// lock-free.
func (ccb *ccBalancerWrapper) watcher() {
for {
@@ -197,7 +197,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
if ccb.subConns == nil {
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
}
- ac, err := ccb.cc.newAddrConn(addrs, opts)
+ ac, err := ccb.cc.newAddrConn(addrs)
if err != nil {
return nil, err
}
@@ -257,7 +257,6 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
}
if !acbw.ac.tryUpdateAddrs(addrs) {
cc := acbw.ac.cc
- opts := acbw.ac.scopts
acbw.ac.mu.Lock()
// Set old ac.acbw to nil so the Shutdown state update will be ignored
// by balancer.
@@ -273,7 +272,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
return
}
- ac, err := cc.newAddrConn(addrs, opts)
+ ac, err := cc.newAddrConn(addrs)
if err != nil {
grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
return
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
index e0ce32cfb..faabf87d0 100644
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
@@ -55,7 +55,7 @@ func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.B
startCh: make(chan struct{}),
conns: make(map[resolver.Address]balancer.SubConn),
connSt: make(map[balancer.SubConn]*scState),
- csEvltr: &balancer.ConnectivityStateEvaluator{},
+ csEvltr: &connectivityStateEvaluator{},
state: connectivity.Idle,
}
cc.UpdateBalancerState(connectivity.Idle, bw)
@@ -80,6 +80,10 @@ type balancerWrapper struct {
cc balancer.ClientConn
targetAddr string // Target without the scheme.
+ // To aggregate the connectivity state.
+ csEvltr *connectivityStateEvaluator
+ state connectivity.State
+
mu sync.Mutex
conns map[resolver.Address]balancer.SubConn
connSt map[balancer.SubConn]*scState
@@ -88,10 +92,6 @@ type balancerWrapper struct {
// - NewSubConn is created, cc wants to notify balancer of state changes;
// - Build hasn't return, cc doesn't have access to balancer.
startCh chan struct{}
-
- // To aggregate the connectivity state.
- csEvltr *balancer.ConnectivityStateEvaluator
- state connectivity.State
}
// lbWatcher watches the Notify channel of the balancer and manages
@@ -248,7 +248,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
scSt.down(errConnClosing)
}
}
- sa := bw.csEvltr.RecordTransition(oldS, s)
+ sa := bw.csEvltr.recordTransition(oldS, s)
if bw.state != sa {
bw.state = sa
}
@@ -257,6 +257,7 @@ func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s conne
// Remove state for this sc.
delete(bw.connSt, sc)
}
+ return
}
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
@@ -269,6 +270,7 @@ func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
}
// There should be a resolver inside the balancer.
// All updates here, if any, are ignored.
+ return
}
func (bw *balancerWrapper) Close() {
@@ -280,6 +282,7 @@ func (bw *balancerWrapper) Close() {
close(bw.startCh)
}
bw.balancer.Close()
+ return
}
// The picker is the balancerWrapper itself.
@@ -326,3 +329,47 @@ func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions)
return sc, done, nil
}
+
+// connectivityStateEvaluator gets updated by addrConns when their
+// states transition, based on which it evaluates the state of
+// ClientConn.
+type connectivityStateEvaluator struct {
+ mu sync.Mutex
+ numReady uint64 // Number of addrConns in ready state.
+ numConnecting uint64 // Number of addrConns in connecting state.
+ numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+// recordTransition records state change happening in every subConn and based on
+// that it evaluates what aggregated state should be.
+// It can only transition between Ready, Connecting and TransientFailure. Other states,
+// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection
+// before any subConn is created ClientConn is in idle state. In the end when ClientConn
+// closes it is in Shutdown state.
+// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state.
+func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State {
+ cse.mu.Lock()
+ defer cse.mu.Unlock()
+
+ // Update counters.
+ for idx, state := range []connectivity.State{oldState, newState} {
+ updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+ switch state {
+ case connectivity.Ready:
+ cse.numReady += updateVal
+ case connectivity.Connecting:
+ cse.numConnecting += updateVal
+ case connectivity.TransientFailure:
+ cse.numTransientFailure += updateVal
+ }
+ }
+
+ // Evaluate.
+ if cse.numReady > 0 {
+ return connectivity.Ready
+ }
+ if cse.numConnecting > 0 {
+ return connectivity.Connecting
+ }
+ return connectivity.TransientFailure
+}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
deleted file mode 100644
index f393bb661..000000000
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ /dev/null
@@ -1,900 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
-
-package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import duration "github.com/golang/protobuf/ptypes/duration"
-import timestamp "github.com/golang/protobuf/ptypes/timestamp"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-// Enumerates the type of event
-// Note the terminology is different from the RPC semantics
-// definition, but the same meaning is expressed here.
-type GrpcLogEntry_EventType int32
-
-const (
- GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
- // Header sent from client to server
- GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
- // Header sent from server to client
- GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
- // Message sent from client to server
- GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
- // Message sent from server to client
- GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
- // A signal that client is done sending
- GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
- // Trailer indicates the end of the RPC.
- // On client side, this event means a trailer was either received
- // from the network or the gRPC library locally generated a status
- // to inform the application about a failure.
- // On server side, this event means the server application requested
- // to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
- // this due to races on server side.
- GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
- // A signal that the RPC is cancelled. On client side, this
- // indicates the client application requests a cancellation.
- // On server side, this indicates that cancellation was detected.
- // Note: This marks the end of the RPC. Events may arrive after
- // this due to races. For example, on client side a trailer
- // may arrive even though the application requested to cancel the RPC.
- GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
-)
-
-var GrpcLogEntry_EventType_name = map[int32]string{
- 0: "EVENT_TYPE_UNKNOWN",
- 1: "EVENT_TYPE_CLIENT_HEADER",
- 2: "EVENT_TYPE_SERVER_HEADER",
- 3: "EVENT_TYPE_CLIENT_MESSAGE",
- 4: "EVENT_TYPE_SERVER_MESSAGE",
- 5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
- 6: "EVENT_TYPE_SERVER_TRAILER",
- 7: "EVENT_TYPE_CANCEL",
-}
-var GrpcLogEntry_EventType_value = map[string]int32{
- "EVENT_TYPE_UNKNOWN": 0,
- "EVENT_TYPE_CLIENT_HEADER": 1,
- "EVENT_TYPE_SERVER_HEADER": 2,
- "EVENT_TYPE_CLIENT_MESSAGE": 3,
- "EVENT_TYPE_SERVER_MESSAGE": 4,
- "EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
- "EVENT_TYPE_SERVER_TRAILER": 6,
- "EVENT_TYPE_CANCEL": 7,
-}
-
-func (x GrpcLogEntry_EventType) String() string {
- return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
-}
-func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
-}
-
-// Enumerates the entity that generates the log entry
-type GrpcLogEntry_Logger int32
-
-const (
- GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
- GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1
- GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2
-)
-
-var GrpcLogEntry_Logger_name = map[int32]string{
- 0: "LOGGER_UNKNOWN",
- 1: "LOGGER_CLIENT",
- 2: "LOGGER_SERVER",
-}
-var GrpcLogEntry_Logger_value = map[string]int32{
- "LOGGER_UNKNOWN": 0,
- "LOGGER_CLIENT": 1,
- "LOGGER_SERVER": 2,
-}
-
-func (x GrpcLogEntry_Logger) String() string {
- return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
-}
-func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
-}
-
-type Address_Type int32
-
-const (
- Address_TYPE_UNKNOWN Address_Type = 0
- // address is in 1.2.3.4 form
- Address_TYPE_IPV4 Address_Type = 1
- // address is in IPv6 canonical form (RFC5952 section 4)
- // The scope is NOT included in the address string.
- Address_TYPE_IPV6 Address_Type = 2
- // address is UDS string
- Address_TYPE_UNIX Address_Type = 3
-)
-
-var Address_Type_name = map[int32]string{
- 0: "TYPE_UNKNOWN",
- 1: "TYPE_IPV4",
- 2: "TYPE_IPV6",
- 3: "TYPE_UNIX",
-}
-var Address_Type_value = map[string]int32{
- "TYPE_UNKNOWN": 0,
- "TYPE_IPV4": 1,
- "TYPE_IPV6": 2,
- "TYPE_UNIX": 3,
-}
-
-func (x Address_Type) String() string {
- return proto.EnumName(Address_Type_name, int32(x))
-}
-func (Address_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
-}
-
-// Log entry we store in binary logs
-type GrpcLogEntry struct {
- // The timestamp of the binary log message
- Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
- // Uniquely identifies a call. The value must not be 0 in order to disambiguate
- // from an unset value.
- // Each call may have several log entries, they will all have the same call_id.
- // Nothing is guaranteed about their value other than they are unique across
- // different RPCs in the same gRPC process.
- CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
- // The entry sequence id for this call. The first GrpcLogEntry has a
- // value of 1, to disambiguate from an unset value. The purpose of
- // this field is to detect missing entries in environments where
- // durability or ordering is not guaranteed.
- SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
- Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
- Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
- // The logger uses one of the following fields to record the payload,
- // according to the type of the log entry.
- //
- // Types that are valid to be assigned to Payload:
- // *GrpcLogEntry_ClientHeader
- // *GrpcLogEntry_ServerHeader
- // *GrpcLogEntry_Message
- // *GrpcLogEntry_Trailer
- Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
- // true if payload does not represent the full message or metadata.
- PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
- // Peer address information, will only be recorded on the first
- // incoming event. On client side, peer is logged on
- // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
- // the case of trailers-only. On server side, peer is always
- // logged on EVENT_TYPE_CLIENT_HEADER.
- Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
-func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
-func (*GrpcLogEntry) ProtoMessage() {}
-func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
-}
-func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
-}
-func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
-}
-func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
-}
-func (m *GrpcLogEntry) XXX_Size() int {
- return xxx_messageInfo_GrpcLogEntry.Size(m)
-}
-func (m *GrpcLogEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
-
-func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetCallId() uint64 {
- if m != nil {
- return m.CallId
- }
- return 0
-}
-
-func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
- if m != nil {
- return m.SequenceIdWithinCall
- }
- return 0
-}
-
-func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
- if m != nil {
- return m.Type
- }
- return GrpcLogEntry_EVENT_TYPE_UNKNOWN
-}
-
-func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
- if m != nil {
- return m.Logger
- }
- return GrpcLogEntry_LOGGER_UNKNOWN
-}
-
-type isGrpcLogEntry_Payload interface {
- isGrpcLogEntry_Payload()
-}
-
-type GrpcLogEntry_ClientHeader struct {
- ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
-}
-
-type GrpcLogEntry_ServerHeader struct {
- ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
-}
-
-type GrpcLogEntry_Message struct {
- Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
-}
-
-type GrpcLogEntry_Trailer struct {
- Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
-}
-
-func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
-
-func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
-
-func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
-
-func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
-
-func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
- if m != nil {
- return m.Payload
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
- if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
- return x.ClientHeader
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
- if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
- return x.ServerHeader
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetMessage() *Message {
- if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
- return x.Message
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetTrailer() *Trailer {
- if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
- return x.Trailer
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetPayloadTruncated() bool {
- if m != nil {
- return m.PayloadTruncated
- }
- return false
-}
-
-func (m *GrpcLogEntry) GetPeer() *Address {
- if m != nil {
- return m.Peer
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
- (*GrpcLogEntry_ClientHeader)(nil),
- (*GrpcLogEntry_ServerHeader)(nil),
- (*GrpcLogEntry_Message)(nil),
- (*GrpcLogEntry_Trailer)(nil),
- }
-}
-
-func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*GrpcLogEntry)
- // payload
- switch x := m.Payload.(type) {
- case *GrpcLogEntry_ClientHeader:
- b.EncodeVarint(6<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ClientHeader); err != nil {
- return err
- }
- case *GrpcLogEntry_ServerHeader:
- b.EncodeVarint(7<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ServerHeader); err != nil {
- return err
- }
- case *GrpcLogEntry_Message:
- b.EncodeVarint(8<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.Message); err != nil {
- return err
- }
- case *GrpcLogEntry_Trailer:
- b.EncodeVarint(9<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.Trailer); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
- }
- return nil
-}
-
-func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*GrpcLogEntry)
- switch tag {
- case 6: // payload.client_header
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(ClientHeader)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_ClientHeader{msg}
- return true, err
- case 7: // payload.server_header
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(ServerHeader)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_ServerHeader{msg}
- return true, err
- case 8: // payload.message
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Message)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_Message{msg}
- return true, err
- case 9: // payload.trailer
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Trailer)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_Trailer{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*GrpcLogEntry)
- // payload
- switch x := m.Payload.(type) {
- case *GrpcLogEntry_ClientHeader:
- s := proto.Size(x.ClientHeader)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_ServerHeader:
- s := proto.Size(x.ServerHeader)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_Message:
- s := proto.Size(x.Message)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_Trailer:
- s := proto.Size(x.Trailer)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
-type ClientHeader struct {
- // This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // The name of the RPC method, which looks something like:
- // /<service>/<method>
- // Note the leading "/" character.
- MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
- // A single process may be used to run multiple virtual
- // servers with different identities.
- // The authority is the name of such a server identitiy.
- // It is typically a portion of the URI in the form of
- // <host> or <host>:<port> .
- Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
- // the RPC timeout
- Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ClientHeader) Reset() { *m = ClientHeader{} }
-func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
-func (*ClientHeader) ProtoMessage() {}
-func (*ClientHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
-}
-func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
-}
-func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
-}
-func (dst *ClientHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClientHeader.Merge(dst, src)
-}
-func (m *ClientHeader) XXX_Size() int {
- return xxx_messageInfo_ClientHeader.Size(m)
-}
-func (m *ClientHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_ClientHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
-
-func (m *ClientHeader) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
- }
- return nil
-}
-
-func (m *ClientHeader) GetMethodName() string {
- if m != nil {
- return m.MethodName
- }
- return ""
-}
-
-func (m *ClientHeader) GetAuthority() string {
- if m != nil {
- return m.Authority
- }
- return ""
-}
-
-func (m *ClientHeader) GetTimeout() *duration.Duration {
- if m != nil {
- return m.Timeout
- }
- return nil
-}
-
-type ServerHeader struct {
- // This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ServerHeader) Reset() { *m = ServerHeader{} }
-func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
-func (*ServerHeader) ProtoMessage() {}
-func (*ServerHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
-}
-func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
-}
-func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
-}
-func (dst *ServerHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerHeader.Merge(dst, src)
-}
-func (m *ServerHeader) XXX_Size() int {
- return xxx_messageInfo_ServerHeader.Size(m)
-}
-func (m *ServerHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
-
-func (m *ServerHeader) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
- }
- return nil
-}
-
-type Trailer struct {
- // This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // The gRPC status code.
- StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
- // An original status message before any transport specific
- // encoding.
- StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
- // The value of the 'grpc-status-details-bin' metadata key. If
- // present, this is always an encoded 'google.rpc.Status' message.
- StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Trailer) Reset() { *m = Trailer{} }
-func (m *Trailer) String() string { return proto.CompactTextString(m) }
-func (*Trailer) ProtoMessage() {}
-func (*Trailer) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
-}
-func (m *Trailer) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Trailer.Unmarshal(m, b)
-}
-func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
-}
-func (dst *Trailer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Trailer.Merge(dst, src)
-}
-func (m *Trailer) XXX_Size() int {
- return xxx_messageInfo_Trailer.Size(m)
-}
-func (m *Trailer) XXX_DiscardUnknown() {
- xxx_messageInfo_Trailer.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Trailer proto.InternalMessageInfo
-
-func (m *Trailer) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
- }
- return nil
-}
-
-func (m *Trailer) GetStatusCode() uint32 {
- if m != nil {
- return m.StatusCode
- }
- return 0
-}
-
-func (m *Trailer) GetStatusMessage() string {
- if m != nil {
- return m.StatusMessage
- }
- return ""
-}
-
-func (m *Trailer) GetStatusDetails() []byte {
- if m != nil {
- return m.StatusDetails
- }
- return nil
-}
-
-// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
-type Message struct {
- // Length of the message. It may not be the same as the length of the
- // data field, as the logging payload can be truncated or omitted.
- Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
- // May be truncated or omitted.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Message) Reset() { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage() {}
-func (*Message) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Message.Unmarshal(m, b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Message.Marshal(b, m, deterministic)
-}
-func (dst *Message) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message.Merge(dst, src)
-}
-func (m *Message) XXX_Size() int {
- return xxx_messageInfo_Message.Size(m)
-}
-func (m *Message) XXX_DiscardUnknown() {
- xxx_messageInfo_Message.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-func (m *Message) GetLength() uint32 {
- if m != nil {
- return m.Length
- }
- return 0
-}
-
-func (m *Message) GetData() []byte {
- if m != nil {
- return m.Data
- }
- return nil
-}
-
-// A list of metadata pairs, used in the payload of client header,
-// server header, and server trailer.
-// Implementations may omit some entries to honor the header limits
-// of GRPC_BINARY_LOG_CONFIG.
-//
-// Header keys added by gRPC are omitted. To be more specific,
-// implementations will not log the following entries, and this is
-// not to be treated as a truncation:
-// - entries handled by grpc that are not user visible, such as those
-// that begin with 'grpc-' (with exception of grpc-trace-bin)
-// or keys like 'lb-token'
-// - transport specific entries, including but not limited to:
-// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
-// - entries added for call credentials
-//
-// Implementations must always log grpc-trace-bin if it is present.
-// Practically speaking it will only be visible on server side because
-// grpc-trace-bin is managed by low level client side mechanisms
-// inaccessible from the application level. On server side, the
-// header is just a normal metadata key.
-// The pair will not count towards the size limit.
-type Metadata struct {
- Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metadata) Reset() { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage() {}
-func (*Metadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metadata.Unmarshal(m, b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
-}
-func (dst *Metadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metadata.Merge(dst, src)
-}
-func (m *Metadata) XXX_Size() int {
- return xxx_messageInfo_Metadata.Size(m)
-}
-func (m *Metadata) XXX_DiscardUnknown() {
- xxx_messageInfo_Metadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func (m *Metadata) GetEntry() []*MetadataEntry {
- if m != nil {
- return m.Entry
- }
- return nil
-}
-
-// A metadata key value pair
-type MetadataEntry struct {
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
-func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
-func (*MetadataEntry) ProtoMessage() {}
-func (*MetadataEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
-}
-func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
-}
-func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
-}
-func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetadataEntry.Merge(dst, src)
-}
-func (m *MetadataEntry) XXX_Size() int {
- return xxx_messageInfo_MetadataEntry.Size(m)
-}
-func (m *MetadataEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
-
-func (m *MetadataEntry) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *MetadataEntry) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-// Address information
-type Address struct {
- Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
- Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
- // only for TYPE_IPV4 and TYPE_IPV6
- IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Address) Reset() { *m = Address{} }
-func (m *Address) String() string { return proto.CompactTextString(m) }
-func (*Address) ProtoMessage() {}
-func (*Address) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
-}
-func (m *Address) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Address.Unmarshal(m, b)
-}
-func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Address.Marshal(b, m, deterministic)
-}
-func (dst *Address) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Address.Merge(dst, src)
-}
-func (m *Address) XXX_Size() int {
- return xxx_messageInfo_Address.Size(m)
-}
-func (m *Address) XXX_DiscardUnknown() {
- xxx_messageInfo_Address.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Address proto.InternalMessageInfo
-
-func (m *Address) GetType() Address_Type {
- if m != nil {
- return m.Type
- }
- return Address_TYPE_UNKNOWN
-}
-
-func (m *Address) GetAddress() string {
- if m != nil {
- return m.Address
- }
- return ""
-}
-
-func (m *Address) GetIpPort() uint32 {
- if m != nil {
- return m.IpPort
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
- proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
- proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
- proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
- proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
- proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
- proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
- proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
- proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
- proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
- proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
-}
-
-func init() {
- proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
-}
-
-var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
- // 900 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
- 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
- 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
- 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
- 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
- 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
- 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
- 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
- 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
- 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
- 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
- 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
- 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
- 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
- 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
- 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
- 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
- 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
- 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
- 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
- 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
- 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
- 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
- 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
- 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
- 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
- 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
- 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
- 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
- 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
- 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
- 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
- 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
- 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
- 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
- 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
- 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
- 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
- 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
- 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
- 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
- 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
- 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
- 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
- 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
- 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
- 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
- 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
- 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
- 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
- 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
- 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
- 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
- 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
- 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
- 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
- 0xd4, 0x07, 0x00, 0x00,
-}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index 180d79d06..13cf8b13b 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -19,39 +19,138 @@
package grpc
import (
+ "io"
+ "time"
+
"golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/peer"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
)
+// recvResponse receives and parses an RPC response.
+// On error, it returns the error and indicates whether the call should be retried.
+//
+// TODO(zhaoq): Check whether the received message sequence is valid.
+// TODO ctx is used for stats collection and processing. It is the context passed from the application.
+func recvResponse(ctx context.Context, dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) (err error) {
+ // Try to acquire header metadata from the server if there is any.
+ defer func() {
+ if err != nil {
+ if _, ok := err.(transport.ConnectionError); !ok {
+ t.CloseStream(stream, err)
+ }
+ }
+ }()
+ c.headerMD, err = stream.Header()
+ if err != nil {
+ return
+ }
+ p := &parser{r: stream}
+ var inPayload *stats.InPayload
+ if dopts.copts.StatsHandler != nil {
+ inPayload = &stats.InPayload{
+ Client: true,
+ }
+ }
+ for {
+ if c.maxReceiveMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ }
+
+ // Set dc if it exists and matches the message compression type used,
+ // otherwise set comp if a registered compressor exists for it.
+ var comp encoding.Compressor
+ var dc Decompressor
+ if rc := stream.RecvCompress(); dopts.dc != nil && dopts.dc.Type() == rc {
+ dc = dopts.dc
+ } else if rc != "" && rc != encoding.Identity {
+ comp = encoding.GetCompressor(rc)
+ }
+ if err = recv(p, dopts.codec, stream, dc, reply, *c.maxReceiveMessageSize, inPayload, comp); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return
+ }
+ }
+ if inPayload != nil && err == io.EOF && stream.Status().Code() == codes.OK {
+ // TODO in the current implementation, inTrailer may be handled before inPayload in some cases.
+ // Fix the order if necessary.
+ dopts.copts.StatsHandler.HandleRPC(ctx, inPayload)
+ }
+ c.trailerMD = stream.Trailer()
+ return nil
+}
+
+// sendRequest writes out various information of an RPC such as Context and Message.
+func sendRequest(ctx context.Context, dopts dialOptions, compressor Compressor, c *callInfo, callHdr *transport.CallHdr, stream *transport.Stream, t transport.ClientTransport, args interface{}, opts *transport.Options) (err error) {
+ defer func() {
+ if err != nil {
+ // If err is connection error, t will be closed, no need to close stream here.
+ if _, ok := err.(transport.ConnectionError); !ok {
+ t.CloseStream(stream, err)
+ }
+ }
+ }()
+ var (
+ outPayload *stats.OutPayload
+ )
+ if dopts.copts.StatsHandler != nil {
+ outPayload = &stats.OutPayload{
+ Client: true,
+ }
+ }
+ // Set comp and clear compressor if a registered compressor matches the type
+ // specified via UseCompressor. (And error if a matching compressor is not
+ // registered.)
+ var comp encoding.Compressor
+ if ct := c.compressorType; ct != "" && ct != encoding.Identity {
+ compressor = nil // Disable the legacy compressor.
+ comp = encoding.GetCompressor(ct)
+ if comp == nil {
+ return status.Errorf(codes.Internal, "grpc: Compressor is not installed for grpc-encoding %q", ct)
+ }
+ }
+ hdr, data, err := encode(dopts.codec, args, compressor, outPayload, comp)
+ if err != nil {
+ return err
+ }
+ if c.maxSendMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
+ }
+ if len(data) > *c.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), *c.maxSendMessageSize)
+ }
+ err = t.Write(stream, hdr, data, opts)
+ if err == nil && outPayload != nil {
+ outPayload.SentTime = time.Now()
+ dopts.copts.StatsHandler.HandleRPC(ctx, outPayload)
+ }
+ // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method
+ // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following
+ // recvResponse to get the final status.
+ if err != nil && err != io.EOF {
+ return err
+ }
+ // Sent successfully.
+ return nil
+}
+
// Invoke sends the RPC request on the wire and returns after response is
// received. This is typically called by generated code.
-//
-// All errors returned by Invoke are compatible with the status package.
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
- // allow interceptor to see all applicable call options, which means those
- // configured as defaults from dial option as well as per-call options
- opts = combine(cc.dopts.callOptions, opts)
-
if cc.dopts.unaryInt != nil {
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
}
return invoke(ctx, method, args, reply, cc, opts...)
}
-func combine(o1 []CallOption, o2 []CallOption) []CallOption {
- // we don't use append because o1 could have extra capacity whose
- // elements would be overwritten, which could cause inadvertent
- // sharing (and race connditions) between concurrent calls
- if len(o1) == 0 {
- return o2
- } else if len(o2) == 0 {
- return o1
- }
- ret := make([]CallOption, len(o1)+len(o2))
- copy(ret, o1)
- copy(ret[len(o1):], o2)
- return ret
-}
-
// Invoke sends the RPC request on the wire and returns after response is
// received. This is typically called by generated code.
//
@@ -60,15 +159,188 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
return cc.Invoke(ctx, method, args, reply, opts...)
}
-var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
+func invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (e error) {
+ c := defaultCallInfo()
+ mc := cc.GetMethodConfig(method)
+ if mc.WaitForReady != nil {
+ c.failFast = !*mc.WaitForReady
+ }
-func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
- cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
- if err != nil {
- return err
+ if mc.Timeout != nil && *mc.Timeout >= 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
+ defer cancel()
+ }
+
+ opts = append(cc.dopts.callOptions, opts...)
+ for _, o := range opts {
+ if err := o.before(c); err != nil {
+ return toRPCErr(err)
+ }
}
- if err := cs.SendMsg(req); err != nil {
+ defer func() {
+ for _, o := range opts {
+ o.after(c)
+ }
+ }()
+
+ c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
+ c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+
+ if EnableTracing {
+ c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
+ defer c.traceInfo.tr.Finish()
+ c.traceInfo.firstLine.client = true
+ if deadline, ok := ctx.Deadline(); ok {
+ c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
+ }
+ c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
+ // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
+ defer func() {
+ if e != nil {
+ c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{e}}, true)
+ c.traceInfo.tr.SetError()
+ }
+ }()
+ }
+ ctx = newContextWithRPCInfo(ctx, c.failFast)
+ sh := cc.dopts.copts.StatsHandler
+ if sh != nil {
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
+ begin := &stats.Begin{
+ Client: true,
+ BeginTime: time.Now(),
+ FailFast: c.failFast,
+ }
+ sh.HandleRPC(ctx, begin)
+ defer func() {
+ end := &stats.End{
+ Client: true,
+ EndTime: time.Now(),
+ Error: e,
+ }
+ sh.HandleRPC(ctx, end)
+ }()
+ }
+ topts := &transport.Options{
+ Last: true,
+ Delay: false,
+ }
+ callHdr := &transport.CallHdr{
+ Host: cc.authority,
+ Method: method,
+ }
+ if c.creds != nil {
+ callHdr.Creds = c.creds
+ }
+ if c.compressorType != "" {
+ callHdr.SendCompress = c.compressorType
+ } else if cc.dopts.cp != nil {
+ callHdr.SendCompress = cc.dopts.cp.Type()
+ }
+ firstAttempt := true
+
+ for {
+ // Check to make sure the context has expired. This will prevent us from
+ // looping forever if an error occurs for wait-for-ready RPCs where no data
+ // is sent on the wire.
+ select {
+ case <-ctx.Done():
+ return toRPCErr(ctx.Err())
+ default:
+ }
+
+ // Record the done handler from Balancer.Get(...). It is called once the
+ // RPC has completed or failed.
+ t, done, err := cc.getTransport(ctx, c.failFast)
+ if err != nil {
+ return err
+ }
+ stream, err := t.NewStream(ctx, callHdr)
+ if err != nil {
+ if done != nil {
+ done(balancer.DoneInfo{Err: err})
+ }
+ // In the event of any error from NewStream, we never attempted to write
+ // anything to the wire, so we can retry indefinitely for non-fail-fast
+ // RPCs.
+ if !c.failFast {
+ continue
+ }
+ return toRPCErr(err)
+ }
+ if peer, ok := peer.FromContext(stream.Context()); ok {
+ c.peer = peer
+ }
+ if c.traceInfo.tr != nil {
+ c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
+ }
+ err = sendRequest(ctx, cc.dopts, cc.dopts.cp, c, callHdr, stream, t, args, topts)
+ if err != nil {
+ if done != nil {
+ done(balancer.DoneInfo{
+ Err: err,
+ BytesSent: true,
+ BytesReceived: stream.BytesReceived(),
+ })
+ }
+ // Retry a non-failfast RPC when
+ // i) the server started to drain before this RPC was initiated.
+ // ii) the server refused the stream.
+ if !c.failFast && stream.Unprocessed() {
+ // In this case, the server did not receive the data, but we still
+ // created wire traffic, so we should not retry indefinitely.
+ if firstAttempt {
+ // TODO: Add a field to header for grpc-transparent-retry-attempts
+ firstAttempt = false
+ continue
+ }
+ // Otherwise, give up and return an error anyway.
+ }
+ return toRPCErr(err)
+ }
+ err = recvResponse(ctx, cc.dopts, t, c, stream, reply)
+ if err != nil {
+ if done != nil {
+ done(balancer.DoneInfo{
+ Err: err,
+ BytesSent: true,
+ BytesReceived: stream.BytesReceived(),
+ })
+ }
+ if !c.failFast && stream.Unprocessed() {
+ // In these cases, the server did not receive the data, but we still
+ // created wire traffic, so we should not retry indefinitely.
+ if firstAttempt {
+ // TODO: Add a field to header for grpc-transparent-retry-attempts
+ firstAttempt = false
+ continue
+ }
+ // Otherwise, give up and return an error anyway.
+ }
+ return toRPCErr(err)
+ }
+ if c.traceInfo.tr != nil {
+ c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
+ }
+ t.CloseStream(stream, nil)
+ err = stream.Status().Err()
+ if done != nil {
+ done(balancer.DoneInfo{
+ Err: err,
+ BytesSent: true,
+ BytesReceived: stream.BytesReceived(),
+ })
+ }
+ if !c.failFast && stream.Unprocessed() {
+ // In these cases, the server did not receive the data, but we still
+ // created wire traffic, so we should not retry indefinitely.
+ if firstAttempt {
+ // TODO: Add a field to header for grpc-transparent-retry-attempts
+ firstAttempt = false
+ continue
+ }
+ }
return err
}
- return cs.RecvMsg(reply)
}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index f49ac3f9b..bfbef3621 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -26,52 +26,41 @@ import (
"reflect"
"strings"
"sync"
- "sync/atomic"
"time"
"golang.org/x/net/context"
+ "golang.org/x/net/trace"
"google.golang.org/grpc/balancer"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
- "google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
_ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver.
- "google.golang.org/grpc/status"
-)
-
-const (
- // minimum time to give a connection to complete
- minConnectTimeout = 20 * time.Second
- // must match grpclbName in grpclb/grpclb.go
- grpclbName = "grpclb"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/transport"
)
var (
// ErrClientConnClosing indicates that the operation is illegal because
// the ClientConn is closing.
- //
- // Deprecated: this error should not be relied upon by users; use the status
- // code of Canceled instead.
- ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing")
+ ErrClientConnClosing = errors.New("grpc: the client connection is closing")
+ // ErrClientConnTimeout indicates that the ClientConn cannot establish the
+ // underlying connections within the specified timeout.
+ // DEPRECATED: Please use context.DeadlineExceeded instead.
+ ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
// errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
+ // errConnUnavailable indicates that the connection is unavailable.
+ errConnUnavailable = errors.New("grpc: the connection is unavailable")
// errBalancerClosed indicates that the balancer is closed.
errBalancerClosed = errors.New("grpc: balancer is closed")
- // We use an accessor so that minConnectTimeout can be
- // atomically read and updated while testing.
- getMinConnectTimeout = func() time.Duration {
- return minConnectTimeout
- }
+ // minimum time to give a connection to complete
+ minConnectTimeout = 20 * time.Second
)
// The following errors are returned from Dial and DialContext
@@ -80,9 +69,6 @@ var (
// being set for ClientConn. Users should either set one or explicitly
// call WithInsecure DialOption to disable security.
errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
- // errTransportCredsAndBundle indicates that creds bundle is used together
- // with other individual Transport Credentials.
- errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
// errTransportCredentialsMissing indicates that users want to transmit security
// information (e.g., oauth2 token) which requires secure connection on an insecure
// connection.
@@ -90,83 +76,357 @@ var (
// errCredentialsConflict indicates that grpc.WithTransportCredentials()
// and grpc.WithInsecure() are both called for a connection.
errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
+ // errNetworkIO indicates that the connection is down due to some network I/O error.
+ errNetworkIO = errors.New("grpc: failed with network I/O error")
)
+// dialOptions configure a Dial call. dialOptions are set by the DialOption
+// values passed to Dial.
+type dialOptions struct {
+ unaryInt UnaryClientInterceptor
+ streamInt StreamClientInterceptor
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ bs backoffStrategy
+ block bool
+ insecure bool
+ timeout time.Duration
+ scChan <-chan ServiceConfig
+ copts transport.ConnectOptions
+ callOptions []CallOption
+ // This is used by v1 balancer dial option WithBalancer to support v1
+ // balancer, and also by WithBalancerName dial option.
+ balancerBuilder balancer.Builder
+ // This is to support grpclb.
+ resolverBuilder resolver.Builder
+ // Custom user options for resolver.Build.
+ resolverBuildUserOptions interface{}
+ waitForHandshake bool
+}
+
const (
defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4
defaultClientMaxSendMessageSize = math.MaxInt32
- // http2IOBufSize specifies the buffer size for sending frames.
- defaultWriteBufSize = 32 * 1024
- defaultReadBufSize = 32 * 1024
)
-// Dial creates a client connection to the given target.
-func Dial(target string, opts ...DialOption) (*ClientConn, error) {
- return DialContext(context.Background(), target, opts...)
+// DialOption configures how we set up the connection.
+type DialOption func(*dialOptions)
+
+// WithWaitForHandshake blocks until the initial settings frame is received from the
+// server before assigning RPCs to the connection.
+// Experimental API.
+func WithWaitForHandshake() DialOption {
+ return func(o *dialOptions) {
+ o.waitForHandshake = true
+ }
+}
+
+// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
+// before doing a write on the wire.
+func WithWriteBufferSize(s int) DialOption {
+ return func(o *dialOptions) {
+ o.copts.WriteBufferSize = s
+ }
+}
+
+// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
+// for each read syscall.
+func WithReadBufferSize(s int) DialOption {
+ return func(o *dialOptions) {
+ o.copts.ReadBufferSize = s
+ }
+}
+
+// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func WithInitialWindowSize(s int32) DialOption {
+ return func(o *dialOptions) {
+ o.copts.InitialWindowSize = s
+ }
+}
+
+// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection.
+// The lower bound for window size is 64K and any value smaller than that will be ignored.
+func WithInitialConnWindowSize(s int32) DialOption {
+ return func(o *dialOptions) {
+ o.copts.InitialConnWindowSize = s
+ }
+}
+
+// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
+func WithMaxMsgSize(s int) DialOption {
+ return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
+}
+
+// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection.
+func WithDefaultCallOptions(cos ...CallOption) DialOption {
+ return func(o *dialOptions) {
+ o.callOptions = append(o.callOptions, cos...)
+ }
+}
+
+// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
+func WithCodec(c Codec) DialOption {
+ return func(o *dialOptions) {
+ o.codec = c
+ }
}
-// DialContext creates a client connection to the given target. By default, it's
-// a non-blocking dial (the function won't wait for connections to be
-// established, and connecting happens in the background). To make it a blocking
-// dial, use WithBlock() dial option.
+// WithCompressor returns a DialOption which sets a Compressor to use for
+// message compression. It has lower priority than the compressor set by
+// the UseCompressor CallOption.
//
-// In the non-blocking case, the ctx does not act against the connection. It
-// only controls the setup steps.
+// Deprecated: use UseCompressor instead.
+func WithCompressor(cp Compressor) DialOption {
+ return func(o *dialOptions) {
+ o.cp = cp
+ }
+}
+
+// WithDecompressor returns a DialOption which sets a Decompressor to use for
+// incoming message decompression. If incoming response messages are encoded
+// using the decompressor's Type(), it will be used. Otherwise, the message
+// encoding will be used to look up the compressor registered via
+// encoding.RegisterCompressor, which will then be used to decompress the
+// message. If no compressor is registered for the encoding, an Unimplemented
+// status error will be returned.
+//
+// Deprecated: use encoding.RegisterCompressor instead.
+func WithDecompressor(dc Decompressor) DialOption {
+ return func(o *dialOptions) {
+ o.dc = dc
+ }
+}
+
+// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
+// Name resolver will be ignored if this DialOption is specified.
+//
+// Deprecated: use the new balancer APIs in balancer package and WithBalancerName.
+func WithBalancer(b Balancer) DialOption {
+ return func(o *dialOptions) {
+ o.balancerBuilder = &balancerWrapperBuilder{
+ b: b,
+ }
+ }
+}
+
+// WithBalancerName sets the balancer that the ClientConn will be initialized
+// with. Balancer registered with balancerName will be used. This function
+// panics if no balancer was registered by balancerName.
+//
+// The balancer cannot be overridden by balancer option specified by service
+// config.
+//
+// This is an EXPERIMENTAL API.
+func WithBalancerName(balancerName string) DialOption {
+ builder := balancer.Get(balancerName)
+ if builder == nil {
+ panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
+ }
+ return func(o *dialOptions) {
+ o.balancerBuilder = builder
+ }
+}
+
+// withResolverBuilder is only for grpclb.
+func withResolverBuilder(b resolver.Builder) DialOption {
+ return func(o *dialOptions) {
+ o.resolverBuilder = b
+ }
+}
+
+// WithResolverUserOptions returns a DialOption which sets the UserOptions
+// field of resolver's BuildOption.
+func WithResolverUserOptions(userOpt interface{}) DialOption {
+ return func(o *dialOptions) {
+ o.resolverBuildUserOptions = userOpt
+ }
+}
+
+// WithServiceConfig returns a DialOption which has a channel to read the service configuration.
+// DEPRECATED: service config should be received through name resolver, as specified here.
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+func WithServiceConfig(c <-chan ServiceConfig) DialOption {
+ return func(o *dialOptions) {
+ o.scChan = c
+ }
+}
+
+// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
+// when backing off after failed connection attempts.
+func WithBackoffMaxDelay(md time.Duration) DialOption {
+ return WithBackoffConfig(BackoffConfig{MaxDelay: md})
+}
+
+// WithBackoffConfig configures the dialer to use the provided backoff
+// parameters after connection failures.
//
-// In the blocking case, ctx can be used to cancel or expire the pending
-// connection. Once this function returns, the cancellation and expiration of
-// ctx will be noop. Users should call ClientConn.Close to terminate all the
-// pending operations after this function returns.
+// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
+// for use.
+func WithBackoffConfig(b BackoffConfig) DialOption {
+ // Set defaults to ensure that provided BackoffConfig is valid and
+ // unexported fields get default values.
+ setDefaults(&b)
+ return withBackoff(b)
+}
+
+// withBackoff sets the backoff strategy used for connectRetryNum after a
+// failed connection attempt.
//
-// The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
+// This can be exported if arbitrary backoff strategies are allowed by gRPC.
+func withBackoff(bs backoffStrategy) DialOption {
+ return func(o *dialOptions) {
+ o.bs = bs
+ }
+}
+
+// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
+// connection is up. Without this, Dial returns immediately and connecting the server
+// happens in background.
+func WithBlock() DialOption {
+ return func(o *dialOptions) {
+ o.block = true
+ }
+}
+
+// WithInsecure returns a DialOption which disables transport security for this ClientConn.
+// Note that transport security is required unless WithInsecure is set.
+func WithInsecure() DialOption {
+ return func(o *dialOptions) {
+ o.insecure = true
+ }
+}
+
+// WithTransportCredentials returns a DialOption which configures a
+// connection level security credentials (e.g., TLS/SSL).
+func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
+ return func(o *dialOptions) {
+ o.copts.TransportCredentials = creds
+ }
+}
+
+// WithPerRPCCredentials returns a DialOption which sets
+// credentials and places auth state on each outbound RPC.
+func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
+ return func(o *dialOptions) {
+ o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
+ }
+}
+
+// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn
+// initially. This is valid if and only if WithBlock() is present.
+// Deprecated: use DialContext and context.WithTimeout instead.
+func WithTimeout(d time.Duration) DialOption {
+ return func(o *dialOptions) {
+ o.timeout = d
+ }
+}
+
+func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
+ return func(o *dialOptions) {
+ o.copts.Dialer = f
+ }
+}
+
+// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
+// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's
+// Temporary() method to decide if it should try to reconnect to the network address.
+func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
+ return withContextDialer(
+ func(ctx context.Context, addr string) (net.Conn, error) {
+ if deadline, ok := ctx.Deadline(); ok {
+ return f(addr, deadline.Sub(time.Now()))
+ }
+ return f(addr, 0)
+ })
+}
+
+// WithStatsHandler returns a DialOption that specifies the stats handler
+// for all the RPCs and underlying network connections in this ClientConn.
+func WithStatsHandler(h stats.Handler) DialOption {
+ return func(o *dialOptions) {
+ o.copts.StatsHandler = h
+ }
+}
+
+// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors.
+// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network
+// address and won't try to reconnect.
+// The default value of FailOnNonTempDialError is false.
+// This is an EXPERIMENTAL API.
+func FailOnNonTempDialError(f bool) DialOption {
+ return func(o *dialOptions) {
+ o.copts.FailOnNonTempDialError = f
+ }
+}
+
+// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
+func WithUserAgent(s string) DialOption {
+ return func(o *dialOptions) {
+ o.copts.UserAgent = s
+ }
+}
+
+// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport.
+func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
+ return func(o *dialOptions) {
+ o.copts.KeepaliveParams = kp
+ }
+}
+
+// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs.
+func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
+ return func(o *dialOptions) {
+ o.unaryInt = f
+ }
+}
+
+// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs.
+func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
+ return func(o *dialOptions) {
+ o.streamInt = f
+ }
+}
+
+// WithAuthority returns a DialOption that specifies the value to be used as
+// the :authority pseudo-header. This value only works with WithInsecure and
+// has no effect if TransportCredentials are present.
+func WithAuthority(a string) DialOption {
+ return func(o *dialOptions) {
+ o.copts.Authority = a
+ }
+}
+
+// Dial creates a client connection to the given target.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+ return DialContext(context.Background(), target, opts...)
+}
+
+// DialContext creates a client connection to the given target. ctx can be used to
+// cancel or expire the pending connection. Once this function returns, the
+// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close
+// to terminate all the pending operations after this function returns.
func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
- target: target,
- csMgr: &connectivityStateManager{},
- conns: make(map[*addrConn]struct{}),
- dopts: defaultDialOptions(),
+ target: target,
+ csMgr: &connectivityStateManager{},
+ conns: make(map[*addrConn]struct{}),
+
blockingpicker: newPickerWrapper(),
- czData: new(channelzData),
}
- cc.retryThrottler.Store((*retryThrottler)(nil))
cc.ctx, cc.cancel = context.WithCancel(context.Background())
for _, opt := range opts {
- opt.apply(&cc.dopts)
- }
-
- if channelz.IsOn() {
- if cc.dopts.channelzParentID != 0 {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- Desc: "Channel Created",
- Severity: channelz.CtINFO,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
- Severity: channelz.CtINFO,
- },
- })
- } else {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- Desc: "Channel Created",
- Severity: channelz.CtINFO,
- })
- }
- cc.csMgr.channelzID = cc.channelzID
+ opt(&cc.dopts)
}
if !cc.dopts.insecure {
- if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
+ if cc.dopts.copts.TransportCredentials == nil {
return nil, errNoTransportSecurity
}
- if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
- return nil, errTransportCredsAndBundle
- }
} else {
- if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil {
+ if cc.dopts.copts.TransportCredentials != nil {
return nil, errCredentialsConflict
}
for _, cd := range cc.dopts.copts.PerRPCCredentials {
@@ -181,8 +441,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
if cc.dopts.copts.Dialer == nil {
cc.dopts.copts.Dialer = newProxyDialer(
func(ctx context.Context, addr string) (net.Conn, error) {
- network, addr := parseDialTarget(addr)
- return dialContext(ctx, network, addr)
+ return dialContext(ctx, "tcp", addr)
},
)
}
@@ -223,35 +482,19 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
default:
}
}
- if cc.dopts.bs == nil {
- cc.dopts.bs = backoff.Exponential{
- MaxDelay: DefaultBackoffConfig.MaxDelay,
- }
+ // Set defaults.
+ if cc.dopts.codec == nil {
+ cc.dopts.codec = protoCodec{}
}
- if cc.dopts.resolverBuilder == nil {
- // Only try to parse target when resolver builder is not already set.
- cc.parsedTarget = parseTarget(cc.target)
- grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
- if cc.dopts.resolverBuilder == nil {
- // If resolver builder is still nil, the parse target's scheme is
- // not registered. Fallback to default resolver and set Endpoint to
- // the original unparsed target.
- grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
- cc.parsedTarget = resolver.Target{
- Scheme: resolver.GetDefaultScheme(),
- Endpoint: target,
- }
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
- }
- } else {
- cc.parsedTarget = resolver.Target{Endpoint: target}
+ if cc.dopts.bs == nil {
+ cc.dopts.bs = DefaultBackoffConfig
}
+ cc.parsedTarget = parseTarget(cc.target)
creds := cc.dopts.copts.TransportCredentials
if creds != nil && creds.Info().ServerName != "" {
cc.authority = creds.Info().ServerName
- } else if cc.dopts.insecure && cc.dopts.authority != "" {
- cc.authority = cc.dopts.authority
+ } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" {
+ cc.authority = cc.dopts.copts.Authority
} else {
// Use endpoint from "scheme://authority/endpoint" as the default
// authority for ClientConn.
@@ -278,10 +521,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
credsClone = creds.Clone()
}
cc.balancerBuildOpts = balancer.BuildOptions{
- DialCreds: credsClone,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- ChannelzParentID: cc.channelzID,
+ DialCreds: credsClone,
+ Dialer: cc.dopts.copts.Dialer,
}
// Build the resolver.
@@ -304,13 +545,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
s := cc.GetState()
if s == connectivity.Ready {
break
- } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
- if err = cc.blockingpicker.connectionError(); err != nil {
- terr, ok := err.(interface{ Temporary() bool })
- if ok && !terr.Temporary() {
- return nil, err
- }
- }
}
if !cc.WaitForStateChange(ctx, s) {
// ctx got timeout or canceled.
@@ -328,7 +562,6 @@ type connectivityStateManager struct {
mu sync.Mutex
state connectivity.State
notifyChan chan struct{}
- channelzID int64
}
// updateState updates the connectivity.State of ClientConn.
@@ -344,12 +577,6 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) {
return
}
csm.state = state
- if channelz.IsOn() {
- channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Channel Connectivity change to %v", state),
- Severity: channelz.CtINFO,
- })
- }
if csm.notifyChan != nil {
// There are other goroutines waiting on this channel.
close(csm.notifyChan)
@@ -397,10 +624,6 @@ type ClientConn struct {
preBalancerName string // previous balancer name.
curAddresses []resolver.Address
balancerWrapper *ccBalancerWrapper
- retryThrottler atomic.Value
-
- channelzID int64 // channelz unique identification number
- czData *channelzData
}
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
@@ -527,26 +750,10 @@ func (cc *ClientConn) switchBalancer(name string) {
}
builder := balancer.Get(name)
- // TODO(yuxuanli): If user send a service config that does not contain a valid balancer name, should
- // we reuse previous one?
- if channelz.IsOn() {
- if builder == nil {
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName),
- Severity: channelz.CtWarning,
- })
- } else {
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Channel switches to new LB policy %q", name),
- Severity: channelz.CtINFO,
- })
- }
- }
if builder == nil {
grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
builder = newPickfirstBuilder()
}
-
cc.preBalancerName = cc.curBalancerName
cc.curBalancerName = builder.Name()
cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
@@ -567,15 +774,11 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi
// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
//
// Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) {
ac := &addrConn{
- cc: cc,
- addrs: addrs,
- scopts: opts,
- dopts: cc.dopts,
- czData: new(channelzData),
- successfulHandshake: true, // make the first nextAddr() call _not_ move addrIdx up by 1
- resetBackoff: make(chan struct{}),
+ cc: cc,
+ addrs: addrs,
+ dopts: cc.dopts,
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Track ac in cc. This needs to be done before any getTransport(...) is called.
@@ -584,17 +787,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub
cc.mu.Unlock()
return nil, ErrClientConnClosing
}
- if channelz.IsOn() {
- ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: "Subchannel Created",
- Severity: channelz.CtINFO,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
- Severity: channelz.CtINFO,
- },
- })
- }
cc.conns[ac] = struct{}{}
cc.mu.Unlock()
return ac, nil
@@ -613,39 +805,11 @@ func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) {
ac.tearDown(err)
}
-func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
- return &channelz.ChannelInternalMetric{
- State: cc.GetState(),
- Target: cc.target,
- CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
- }
-}
-
-// Target returns the target string of the ClientConn.
-// This is an EXPERIMENTAL API.
-func (cc *ClientConn) Target() string {
- return cc.target
-}
-
-func (cc *ClientConn) incrCallsStarted() {
- atomic.AddInt64(&cc.czData.callsStarted, 1)
- atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (cc *ClientConn) incrCallsSucceeded() {
- atomic.AddInt64(&cc.czData.callsSucceeded, 1)
-}
-
-func (cc *ClientConn) incrCallsFailed() {
- atomic.AddInt64(&cc.czData.callsFailed, 1)
-}
-
-// connect starts creating a transport.
+// connect starts to creating transport and also starts the transport monitor
+// goroutine for this ac.
// It does nothing if the ac is not IDLE.
// TODO(bar) Move this to the addrConn section.
+// This was part of resetAddrConn, keep it here to make the diff look clean.
func (ac *addrConn) connect() error {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
@@ -656,12 +820,22 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
- ac.updateConnectivityState(connectivity.Connecting)
+ ac.state = connectivity.Connecting
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
ac.mu.Unlock()
// Start a goroutine connecting to the server asynchronously.
- go ac.resetTransport(false)
+ go func() {
+ if err := ac.resetTransport(); err != nil {
+ grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err)
+ if err != errConnClosing {
+ // Keep this ac in cc.conns, to get the reason it's torn down.
+ ac.tearDown(err)
+ }
+ return
+ }
+ ac.transportMonitor()
+ }()
return nil
}
@@ -690,7 +864,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
if curAddrFound {
ac.addrs = addrs
- ac.addrIdx = 0 // Start reconnecting from beginning in the new list.
+ ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list.
}
return curAddrFound
@@ -701,7 +875,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
// the corresponding MethodConfig.
// If there isn't an exact match for the input method, we look for the default config
// under the service (i.e /service/). If there is a default MethodConfig for
-// the service, we return it.
+// the serivce, we return it.
// Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
@@ -710,17 +884,13 @@ func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
m, ok := cc.sc.Methods[method]
if !ok {
i := strings.LastIndex(method, "/")
- m = cc.sc.Methods[method[:i+1]]
+ m, _ = cc.sc.Methods[method[:i+1]]
}
return m
}
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
- hdr, _ := metadata.FromOutgoingContext(ctx)
- t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
- FullMethodName: method,
- Header: hdr,
- })
+func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+ t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{})
if err != nil {
return nil, nil, toRPCErr(err)
}
@@ -730,47 +900,13 @@ func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method st
// handleServiceConfig parses the service config string in JSON format to Go native
// struct ServiceConfig, and store both the struct and the JSON string in ClientConn.
func (cc *ClientConn) handleServiceConfig(js string) error {
- if cc.dopts.disableServiceConfig {
- return nil
- }
- if cc.scRaw == js {
- return nil
- }
- if channelz.IsOn() {
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- // The special formatting of \"%s\" instead of %q is to provide nice printing of service config
- // for human consumption.
- Desc: fmt.Sprintf("Channel has a new service config \"%s\"", js),
- Severity: channelz.CtINFO,
- })
- }
sc, err := parseServiceConfig(js)
if err != nil {
return err
}
cc.mu.Lock()
- // Check if the ClientConn is already closed. Some fields (e.g.
- // balancerWrapper) are set to nil when closing the ClientConn, and could
- // cause nil pointer panic if we don't have this check.
- if cc.conns == nil {
- cc.mu.Unlock()
- return nil
- }
cc.scRaw = js
cc.sc = sc
-
- if sc.retryThrottling != nil {
- newThrottler := &retryThrottler{
- tokens: sc.retryThrottling.MaxTokens,
- max: sc.retryThrottling.MaxTokens,
- thresh: sc.retryThrottling.MaxTokens / 2,
- ratio: sc.retryThrottling.TokenRatio,
- }
- cc.retryThrottler.Store(newThrottler)
- } else {
- cc.retryThrottler.Store((*retryThrottler)(nil))
- }
-
if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config.
if cc.curBalancerName == grpclbName {
// If current balancer is grpclb, there's at least one grpclb
@@ -784,42 +920,23 @@ func (cc *ClientConn) handleServiceConfig(js string) error {
cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil)
}
}
-
cc.mu.Unlock()
return nil
}
func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
- cc.mu.RLock()
+ cc.mu.Lock()
r := cc.resolverWrapper
- cc.mu.RUnlock()
+ cc.mu.Unlock()
if r == nil {
return
}
go r.resolveNow(o)
}
-// ResetConnectBackoff wakes up all subchannels in transient failure and causes
-// them to attempt another connection immediately. It also resets the backoff
-// times used for subsequent attempts regardless of the current state.
-//
-// In general, this function should not be used. Typical service or network
-// outages result in a reasonable client reconnection strategy by default.
-// However, if a previously unavailable network becomes available, this may be
-// used to trigger an immediate reconnect.
-//
-// This API is EXPERIMENTAL.
-func (cc *ClientConn) ResetConnectBackoff() {
- cc.mu.Lock()
- defer cc.mu.Unlock()
- for ac := range cc.conns {
- ac.resetConnectBackoff()
- }
-}
-
// Close tears down the ClientConn and all underlying connections.
func (cc *ClientConn) Close() error {
- defer cc.cancel()
+ cc.cancel()
cc.mu.Lock()
if cc.conns == nil {
@@ -835,35 +952,16 @@ func (cc *ClientConn) Close() error {
bWrapper := cc.balancerWrapper
cc.balancerWrapper = nil
cc.mu.Unlock()
-
cc.blockingpicker.close()
-
if rWrapper != nil {
rWrapper.close()
}
if bWrapper != nil {
bWrapper.close()
}
-
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
}
- if channelz.IsOn() {
- ted := &channelz.TraceEventDesc{
- Desc: "Channel Deleted",
- Severity: channelz.CtINFO,
- }
- if cc.dopts.channelzParentID != 0 {
- ted.Parent = &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
- Severity: channelz.CtINFO,
- }
- }
- channelz.AddTraceEvent(cc.channelzID, ted)
- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
- // the entity beng deleted, and thus prevent it from being deleted right away.
- channelz.RemoveEntry(cc.channelzID)
- }
return nil
}
@@ -873,47 +971,30 @@ type addrConn struct {
cancel context.CancelFunc
cc *ClientConn
+ addrs []resolver.Address
dopts dialOptions
+ events trace.EventLog
acbw balancer.SubConn
- scopts balancer.NewSubConnOptions
-
- transport transport.ClientTransport // The current transport.
-
- mu sync.Mutex
- addrIdx int // The index in addrs list to start reconnecting from.
- curAddr resolver.Address // The current address.
- addrs []resolver.Address // All addresses that the resolver resolved to.
- // Use updateConnectivityState for updating addrConn's connectivity state.
- state connectivity.State
+ mu sync.Mutex
+ curAddr resolver.Address
+ reconnectIdx int // The index in addrs list to start reconnecting from.
+ state connectivity.State
+ // ready is closed and becomes nil when a new transport is up or failed
+ // due to timeout.
+ ready chan struct{}
+ transport transport.ClientTransport
- tearDownErr error // The reason this addrConn is torn down.
+ // The reason this addrConn is torn down.
+ tearDownErr error
- backoffIdx int
+ connectRetryNum int
// backoffDeadline is the time until which resetTransport needs to
- // wait before increasing backoffIdx count.
+ // wait before increasing connectRetryNum count.
backoffDeadline time.Time
// connectDeadline is the time by which all connection
// negotiations must complete.
connectDeadline time.Time
-
- resetBackoff chan struct{}
-
- channelzID int64 // channelz unique identification number
- czData *channelzData
-
- successfulHandshake bool
-}
-
-// Note: this requires a lock on ac.mu.
-func (ac *addrConn) updateConnectivityState(s connectivity.State) {
- ac.state = s
- if channelz.IsOn() {
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchannel Connectivity change to %v", s),
- Severity: channelz.CtINFO,
- })
- }
}
// adjustParams updates parameters used to create transports upon
@@ -930,320 +1011,317 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
}
}
-// resetTransport makes sure that a healthy ac.transport exists.
-//
-// The transport will close itself when it encounters an error, or on GOAWAY, or on deadline waiting for handshake, or
-// when the clientconn is closed. Each iteration creating a new transport will try a different address that the balancer
-// assigned to the addrConn, until it has tried all addresses. Once it has tried all addresses, it will re-resolve to
-// get a new address list. If an error is received, the list is re-resolved and the next reset attempt will try from the
-// beginning. This method has backoff built in. The backoff amount starts at 0 and increases each time resolution occurs
-// (addresses are exhausted). The backoff amount is reset to 0 each time a handshake is received.
-//
-// If the DialOption WithWaitForHandshake was set, resetTransport returns successfully only after handshake is received.
-func (ac *addrConn) resetTransport(resolveNow bool) {
- for {
- // If this is the first in a line of resets, we want to resolve immediately. The only other time we
- // want to reset is if we have tried all the addresses handed to us.
- if resolveNow {
- ac.mu.Lock()
- ac.cc.resolveNow(resolver.ResolveNowOption{})
- ac.mu.Unlock()
- }
+// printf records an event in ac's event log, unless ac has been closed.
+// REQUIRES ac.mu is held.
+func (ac *addrConn) printf(format string, a ...interface{}) {
+ if ac.events != nil {
+ ac.events.Printf(format, a...)
+ }
+}
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
+// errorf records an error in ac's event log, unless ac has been closed.
+// REQUIRES ac.mu is held.
+func (ac *addrConn) errorf(format string, a ...interface{}) {
+ if ac.events != nil {
+ ac.events.Errorf(format, a...)
+ }
+}
- // If the connection is READY, a failure must have occurred.
- // Otherwise, we'll consider this is a transient failure when:
- // We've exhausted all addresses
- // We're in CONNECTING
- // And it's not the very first addr to try TODO(deklerk) find a better way to do this than checking ac.successfulHandshake
- if ac.state == connectivity.Ready || (ac.addrIdx == len(ac.addrs)-1 && ac.state == connectivity.Connecting && !ac.successfulHandshake) {
- ac.updateConnectivityState(connectivity.TransientFailure)
- ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
- }
- ac.transport = nil
+// resetTransport recreates a transport to the address for ac. The old
+// transport will close itself on error or when the clientconn is closed.
+// The created transport must receive initial settings frame from the server.
+// In case that doesnt happen, transportMonitor will kill the newly created
+// transport after connectDeadline has expired.
+// In case there was an error on the transport before the settings frame was
+// received, resetTransport resumes connecting to backends after the one that
+// was previously connected to. In case end of the list is reached, resetTransport
+// backs off until the original deadline.
+// If the DialOption WithWaitForHandshake was set, resetTrasport returns
+// successfully only after server settings are received.
+//
+// TODO(bar) make sure all state transitions are valid.
+func (ac *addrConn) resetTransport() error {
+ ac.mu.Lock()
+ if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
-
- if err := ac.nextAddr(); err != nil {
- return
- }
-
+ return errConnClosing
+ }
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
+ }
+ ac.transport = nil
+ ridx := ac.reconnectIdx
+ ac.mu.Unlock()
+ ac.cc.mu.RLock()
+ ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+ ac.cc.mu.RUnlock()
+ var backoffDeadline, connectDeadline time.Time
+ for connectRetryNum := 0; ; connectRetryNum++ {
ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
-
- backoffIdx := ac.backoffIdx
- backoffFor := ac.dopts.bs.Backoff(backoffIdx)
-
- // This will be the duration that dial gets to finish.
- dialDuration := getMinConnectTimeout()
- if backoffFor > dialDuration {
- // Give dial more time as we keep failing to connect.
- dialDuration = backoffFor
+ if ac.backoffDeadline.IsZero() {
+ // This means either a successful HTTP2 connection was established
+ // or this is the first time this addrConn is trying to establish a
+ // connection.
+ backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration.
+ // This will be the duration that dial gets to finish.
+ dialDuration := minConnectTimeout
+ if backoffFor > dialDuration {
+ // Give dial more time as we keep failing to connect.
+ dialDuration = backoffFor
+ }
+ start := time.Now()
+ backoffDeadline = start.Add(backoffFor)
+ connectDeadline = start.Add(dialDuration)
+ ridx = 0 // Start connecting from the beginning.
+ } else {
+ // Continue trying to conect with the same deadlines.
+ connectRetryNum = ac.connectRetryNum
+ backoffDeadline = ac.backoffDeadline
+ connectDeadline = ac.connectDeadline
+ ac.backoffDeadline = time.Time{}
+ ac.connectDeadline = time.Time{}
+ ac.connectRetryNum = 0
}
- start := time.Now()
- connectDeadline := start.Add(dialDuration)
- ac.backoffDeadline = start.Add(backoffFor)
- ac.connectDeadline = connectDeadline
-
- ac.mu.Unlock()
-
- ac.cc.mu.RLock()
- ac.dopts.copts.KeepaliveParams = ac.cc.mkp
- ac.cc.mu.RUnlock()
-
- ac.mu.Lock()
-
if ac.state == connectivity.Shutdown {
ac.mu.Unlock()
- return
+ return errConnClosing
}
-
+ ac.printf("connecting")
if ac.state != connectivity.Connecting {
- ac.updateConnectivityState(connectivity.Connecting)
+ ac.state = connectivity.Connecting
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
}
-
- addr := ac.addrs[ac.addrIdx]
+ // copy ac.addrs in case of race
+ addrsIter := make([]resolver.Address, len(ac.addrs))
+ copy(addrsIter, ac.addrs)
copts := ac.dopts.copts
- if ac.scopts.CredsBundle != nil {
- copts.CredsBundle = ac.scopts.CredsBundle
- }
ac.mu.Unlock()
-
- if channelz.IsOn() {
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
- Severity: channelz.CtINFO,
- })
+ connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts)
+ if err != nil {
+ return err
}
-
- if err := ac.createTransport(backoffIdx, addr, copts, connectDeadline); err != nil {
- continue
+ if connected {
+ return nil
}
-
- return
}
}
// createTransport creates a connection to one of the backends in addrs.
-func (ac *addrConn) createTransport(backoffNum int, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
- oneReset := sync.Once{}
- skipReset := make(chan struct{})
- allowedToReset := make(chan struct{})
- prefaceReceived := make(chan struct{})
- onCloseCalled := make(chan struct{})
-
- var prefaceMu sync.Mutex
- var serverPrefaceReceived bool
- var clientPrefaceWrote bool
-
- onGoAway := func(r transport.GoAwayReason) {
- ac.mu.Lock()
- ac.adjustParams(r)
- ac.mu.Unlock()
- select {
- case <-skipReset: // The outer resetTransport loop will handle reconnection.
- return
- case <-allowedToReset: // We're in the clear to reset.
- go oneReset.Do(func() { ac.resetTransport(false) })
+// It returns true if a connection was established.
+func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) {
+ for i := ridx; i < len(addrs); i++ {
+ addr := addrs[i]
+ target := transport.TargetInfo{
+ Addr: addr.Addr,
+ Metadata: addr.Metadata,
+ Authority: ac.cc.authority,
}
- }
-
- prefaceTimer := time.NewTimer(connectDeadline.Sub(time.Now()))
-
- onClose := func() {
- close(onCloseCalled)
- prefaceTimer.Stop()
-
- select {
- case <-skipReset: // The outer resetTransport loop will handle reconnection.
- return
- case <-allowedToReset: // We're in the clear to reset.
- oneReset.Do(func() { ac.resetTransport(false) })
- }
- }
-
- target := transport.TargetInfo{
- Addr: addr.Addr,
- Metadata: addr.Metadata,
- Authority: ac.cc.authority,
- }
-
- onPrefaceReceipt := func() {
- close(prefaceReceived)
- prefaceTimer.Stop()
-
- // TODO(deklerk): optimization; does anyone else actually use this lock? maybe we can just remove it for this scope
- ac.mu.Lock()
-
- prefaceMu.Lock()
- serverPrefaceReceived = true
- if clientPrefaceWrote {
- ac.successfulHandshake = true
- ac.backoffDeadline = time.Time{}
- ac.connectDeadline = time.Time{}
- ac.addrIdx = 0
- ac.backoffIdx = 0
+ done := make(chan struct{})
+ onPrefaceReceipt := func() {
+ ac.mu.Lock()
+ close(done)
+ if !ac.backoffDeadline.IsZero() {
+ // If we haven't already started reconnecting to
+ // other backends.
+ // Note, this can happen when writer notices an error
+ // and triggers resetTransport while at the same time
+ // reader receives the preface and invokes this closure.
+ ac.backoffDeadline = time.Time{}
+ ac.connectDeadline = time.Time{}
+ ac.connectRetryNum = 0
+ }
+ ac.mu.Unlock()
}
- prefaceMu.Unlock()
-
- ac.mu.Unlock()
- }
-
- // Do not cancel in the success path because of this issue in Go1.6: https://github.com/golang/go/issues/15078.
- connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
- if channelz.IsOn() {
- copts.ChannelzParentID = ac.channelzID
- }
-
- newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
-
- if err == nil {
- prefaceMu.Lock()
- clientPrefaceWrote = true
- if serverPrefaceReceived {
- ac.successfulHandshake = true
+ // Do not cancel in the success path because of
+ // this issue in Go1.6: https://github.com/golang/go/issues/15078.
+ connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
+ newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt)
+ if err != nil {
+ cancel()
+ if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() {
+ ac.mu.Lock()
+ if ac.state != connectivity.Shutdown {
+ ac.state = connectivity.TransientFailure
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ }
+ ac.mu.Unlock()
+ return false, err
+ }
+ ac.mu.Lock()
+ if ac.state == connectivity.Shutdown {
+ // ac.tearDown(...) has been invoked.
+ ac.mu.Unlock()
+ return false, errConnClosing
+ }
+ ac.mu.Unlock()
+ grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
+ continue
}
- prefaceMu.Unlock()
-
if ac.dopts.waitForHandshake {
select {
- case <-prefaceTimer.C:
- // We didn't get the preface in time.
+ case <-done:
+ case <-connectCtx.Done():
+ // Didn't receive server preface, must kill this new transport now.
+ grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.")
newTr.Close()
- err = errors.New("timed out waiting for server handshake")
- case <-prefaceReceived:
- // We got the preface - huzzah! things are good.
- case <-onCloseCalled:
- // The transport has already closed - noop.
- close(allowedToReset)
- return nil
+ break
+ case <-ac.ctx.Done():
}
- } else {
- go func() {
- select {
- case <-prefaceTimer.C:
- // We didn't get the preface in time.
- newTr.Close()
- case <-prefaceReceived:
- // We got the preface just in the nick of time - huzzah!
- case <-onCloseCalled:
- // The transport has already closed - noop.
- }
- }()
}
- }
-
- if err != nil {
- // newTr is either nil, or closed.
- cancel()
- ac.cc.blockingpicker.updateConnectionError(err)
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
- // ac.tearDown(...) has been invoked.
ac.mu.Unlock()
-
- // We don't want to reset during this close because we prefer to kick out of this function and let the loop
- // in resetTransport take care of reconnecting.
- close(skipReset)
-
- return errConnClosing
+ // ac.tearDonn(...) has been invoked.
+ newTr.Close()
+ return false, errConnClosing
+ }
+ ac.printf("ready")
+ ac.state = connectivity.Ready
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ ac.transport = newTr
+ ac.curAddr = addr
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
+ }
+ select {
+ case <-done:
+ // If the server has responded back with preface already,
+ // don't set the reconnect parameters.
+ default:
+ ac.connectRetryNum = connectRetryNum
+ ac.backoffDeadline = backoffDeadline
+ ac.connectDeadline = connectDeadline
+ ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list.
}
ac.mu.Unlock()
- grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
-
- // We don't want to reset during this close because we prefer to kick out of this function and let the loop
- // in resetTransport take care of reconnecting.
- close(skipReset)
-
- return err
+ return true, nil
}
-
ac.mu.Lock()
-
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
-
- // We don't want to reset during this close because we prefer to kick out of this function and let the loop
- // in resetTransport take care of reconnecting.
- close(skipReset)
-
- newTr.Close()
- return errConnClosing
- }
-
- ac.updateConnectivityState(connectivity.Ready)
+ ac.state = connectivity.TransientFailure
ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
- ac.transport = newTr
- ac.curAddr = addr
-
- ac.mu.Unlock()
-
- // Ok, _now_ we will finally let the transport reset if it encounters a closable error. Without this, the reader
- // goroutine failing races with all the code in this method that sets the connection to "ready".
- close(allowedToReset)
- return nil
-}
-
-// nextAddr increments the addrIdx if there are more addresses to try. If
-// there are no more addrs to try it will re-resolve, set addrIdx to 0, and
-// increment the backoffIdx.
-//
-// nextAddr must be called without ac.mu being held.
-func (ac *addrConn) nextAddr() error {
- ac.mu.Lock()
-
- // If a handshake has been observed, we expect the counters to have manually
- // been reset so we'll just return, since we want the next usage to start
- // at index 0.
- if ac.successfulHandshake {
- ac.successfulHandshake = false
- ac.mu.Unlock()
- return nil
- }
-
- if ac.addrIdx < len(ac.addrs)-1 {
- ac.addrIdx++
- ac.mu.Unlock()
- return nil
- }
-
- ac.addrIdx = 0
- ac.backoffIdx++
-
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return errConnClosing
- }
ac.cc.resolveNow(resolver.ResolveNowOption{})
- backoffDeadline := ac.backoffDeadline
- b := ac.resetBackoff
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
+ }
ac.mu.Unlock()
timer := time.NewTimer(backoffDeadline.Sub(time.Now()))
select {
case <-timer.C:
- case <-b:
- timer.Stop()
case <-ac.ctx.Done():
timer.Stop()
- return ac.ctx.Err()
+ return false, ac.ctx.Err()
}
- return nil
+ return false, nil
}
-func (ac *addrConn) resetConnectBackoff() {
- ac.mu.Lock()
- close(ac.resetBackoff)
- ac.backoffIdx = 0
- ac.resetBackoff = make(chan struct{})
- ac.mu.Unlock()
+// Run in a goroutine to track the error in transport and create the
+// new transport if an error happens. It returns when the channel is closing.
+func (ac *addrConn) transportMonitor() {
+ for {
+ var timer *time.Timer
+ var cdeadline <-chan time.Time
+ ac.mu.Lock()
+ t := ac.transport
+ if !ac.connectDeadline.IsZero() {
+ timer = time.NewTimer(ac.connectDeadline.Sub(time.Now()))
+ cdeadline = timer.C
+ }
+ ac.mu.Unlock()
+ // Block until we receive a goaway or an error occurs.
+ select {
+ case <-t.GoAway():
+ case <-t.Error():
+ case <-cdeadline:
+ ac.mu.Lock()
+ // This implies that client received server preface.
+ if ac.backoffDeadline.IsZero() {
+ ac.mu.Unlock()
+ continue
+ }
+ ac.mu.Unlock()
+ timer = nil
+ // No server preface received until deadline.
+ // Kill the connection.
+ grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.")
+ t.Close()
+ }
+ if timer != nil {
+ timer.Stop()
+ }
+ // If a GoAway happened, regardless of error, adjust our keepalive
+ // parameters as appropriate.
+ select {
+ case <-t.GoAway():
+ ac.adjustParams(t.GetGoAwayReason())
+ default:
+ }
+ ac.mu.Lock()
+ if ac.state == connectivity.Shutdown {
+ ac.mu.Unlock()
+ return
+ }
+ // Set connectivity state to TransientFailure before calling
+ // resetTransport. Transition READY->CONNECTING is not valid.
+ ac.state = connectivity.TransientFailure
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ ac.cc.resolveNow(resolver.ResolveNowOption{})
+ ac.curAddr = resolver.Address{}
+ ac.mu.Unlock()
+ if err := ac.resetTransport(); err != nil {
+ ac.mu.Lock()
+ ac.printf("transport exiting: %v", err)
+ ac.mu.Unlock()
+ grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err)
+ if err != errConnClosing {
+ // Keep this ac in cc.conns, to get the reason it's torn down.
+ ac.tearDown(err)
+ }
+ return
+ }
+ }
+}
+
+// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or
+// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true.
+func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) {
+ for {
+ ac.mu.Lock()
+ switch {
+ case ac.state == connectivity.Shutdown:
+ if failfast || !hasBalancer {
+ // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr.
+ err := ac.tearDownErr
+ ac.mu.Unlock()
+ return nil, err
+ }
+ ac.mu.Unlock()
+ return nil, errConnClosing
+ case ac.state == connectivity.Ready:
+ ct := ac.transport
+ ac.mu.Unlock()
+ return ct, nil
+ case ac.state == connectivity.TransientFailure:
+ if failfast || hasBalancer {
+ ac.mu.Unlock()
+ return nil, errConnUnavailable
+ }
+ }
+ ready := ac.ready
+ if ready == nil {
+ ready = make(chan struct{})
+ ac.ready = ready
+ }
+ ac.mu.Unlock()
+ select {
+ case <-ctx.Done():
+ return nil, toRPCErr(ctx.Err())
+ // Wait until the new transport is ready or failed.
+ case <-ready:
+ }
+ }
}
// getReadyTransport returns the transport if ac's state is READY.
@@ -1251,7 +1329,7 @@ func (ac *addrConn) resetConnectBackoff() {
// If ac's state is IDLE, it will trigger ac to connect.
func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
ac.mu.Lock()
- if ac.state == connectivity.Ready && ac.transport != nil {
+ if ac.state == connectivity.Ready {
t := ac.transport
ac.mu.Unlock()
return t, true
@@ -1274,42 +1352,32 @@ func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
// tight loop.
// tearDown doesn't remove ac from ac.cc.conns.
func (ac *addrConn) tearDown(err error) {
+ ac.cancel()
ac.mu.Lock()
+ defer ac.mu.Unlock()
if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
return
}
- // We have to set the state to Shutdown before anything else to prevent races
- // between setting the state and logic that waits on context cancelation / etc.
- ac.updateConnectivityState(connectivity.Shutdown)
- ac.cancel()
- ac.tearDownErr = err
- ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
ac.curAddr = resolver.Address{}
if err == errConnDrain && ac.transport != nil {
// GracefulClose(...) may be executed multiple times when
// i) receiving multiple GoAway frames from the server; or
// ii) there are concurrent name resolver/Balancer triggered
// address removal and GoAway.
- // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu.
- ac.mu.Unlock()
ac.transport.GracefulClose()
- ac.mu.Lock()
}
- if channelz.IsOn() {
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: "Subchannel Deleted",
- Severity: channelz.CtINFO,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
- Severity: channelz.CtINFO,
- },
- })
- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
- // the entity beng deleted, and thus prevent it from being deleted right away.
- channelz.RemoveEntry(ac.channelzID)
+ ac.state = connectivity.Shutdown
+ ac.tearDownErr = err
+ ac.cc.handleSubConnStateChange(ac.acbw, ac.state)
+ if ac.events != nil {
+ ac.events.Finish()
+ ac.events = nil
}
- ac.mu.Unlock()
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
+ }
+ return
}
func (ac *addrConn) getState() connectivity.State {
@@ -1317,82 +1385,3 @@ func (ac *addrConn) getState() connectivity.State {
defer ac.mu.Unlock()
return ac.state
}
-
-func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
- ac.mu.Lock()
- addr := ac.curAddr.Addr
- ac.mu.Unlock()
- return &channelz.ChannelInternalMetric{
- State: ac.getState(),
- Target: addr,
- CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
- }
-}
-
-func (ac *addrConn) incrCallsStarted() {
- atomic.AddInt64(&ac.czData.callsStarted, 1)
- atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (ac *addrConn) incrCallsSucceeded() {
- atomic.AddInt64(&ac.czData.callsSucceeded, 1)
-}
-
-func (ac *addrConn) incrCallsFailed() {
- atomic.AddInt64(&ac.czData.callsFailed, 1)
-}
-
-type retryThrottler struct {
- max float64
- thresh float64
- ratio float64
-
- mu sync.Mutex
- tokens float64 // TODO(dfawley): replace with atomic and remove lock.
-}
-
-// throttle subtracts a retry token from the pool and returns whether a retry
-// should be throttled (disallowed) based upon the retry throttling policy in
-// the service config.
-func (rt *retryThrottler) throttle() bool {
- if rt == nil {
- return false
- }
- rt.mu.Lock()
- defer rt.mu.Unlock()
- rt.tokens--
- if rt.tokens < 0 {
- rt.tokens = 0
- }
- return rt.tokens <= rt.thresh
-}
-
-func (rt *retryThrottler) successfulRPC() {
- if rt == nil {
- return
- }
- rt.mu.Lock()
- defer rt.mu.Unlock()
- rt.tokens += rt.ratio
- if rt.tokens > rt.max {
- rt.tokens = rt.max
- }
-}
-
-type channelzChannel struct {
- cc *ClientConn
-}
-
-func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
- return c.cc.channelzMetric()
-}
-
-// ErrClientConnTimeout indicates that the ClientConn cannot establish the
-// underlying connections within the specified timeout.
-//
-// Deprecated: This error is never returned by grpc and should not be
-// referenced by users.
-var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 129776547..43d81ed2a 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -19,32 +19,96 @@
package grpc
import (
- "google.golang.org/grpc/encoding"
- _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
-)
+ "math"
+ "sync"
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
-type baseCodec interface {
- Marshal(v interface{}) ([]byte, error)
- Unmarshal(data []byte, v interface{}) error
-}
-
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
+ "github.com/golang/protobuf/proto"
+)
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
// a Codec's methods can be called from concurrent goroutines.
-//
-// Deprecated: use encoding.Codec instead.
type Codec interface {
// Marshal returns the wire format of v.
Marshal(v interface{}) ([]byte, error)
// Unmarshal parses the wire format into v.
Unmarshal(data []byte, v interface{}) error
- // String returns the name of the Codec implementation. This is unused by
- // gRPC.
+ // String returns the name of the Codec implementation. The returned
+ // string will be used as part of content type in transmission.
String() string
}
+
+// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC.
+type protoCodec struct {
+}
+
+type cachedProtoBuffer struct {
+ lastMarshaledSize uint32
+ proto.Buffer
+}
+
+func capToMaxInt32(val int) uint32 {
+ if val > math.MaxInt32 {
+ return uint32(math.MaxInt32)
+ }
+ return uint32(val)
+}
+
+func (p protoCodec) marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
+ protoMsg := v.(proto.Message)
+ newSlice := make([]byte, 0, cb.lastMarshaledSize)
+
+ cb.SetBuf(newSlice)
+ cb.Reset()
+ if err := cb.Marshal(protoMsg); err != nil {
+ return nil, err
+ }
+ out := cb.Bytes()
+ cb.lastMarshaledSize = capToMaxInt32(len(out))
+ return out, nil
+}
+
+func (p protoCodec) Marshal(v interface{}) ([]byte, error) {
+ if pm, ok := v.(proto.Marshaler); ok {
+ // object can marshal itself, no need for buffer
+ return pm.Marshal()
+ }
+
+ cb := protoBufferPool.Get().(*cachedProtoBuffer)
+ out, err := p.marshal(v, cb)
+
+ // put back buffer and lose the ref to the slice
+ cb.SetBuf(nil)
+ protoBufferPool.Put(cb)
+ return out, err
+}
+
+func (p protoCodec) Unmarshal(data []byte, v interface{}) error {
+ protoMsg := v.(proto.Message)
+ protoMsg.Reset()
+
+ if pu, ok := protoMsg.(proto.Unmarshaler); ok {
+ // object can unmarshal itself, no need for buffer
+ return pu.Unmarshal(data)
+ }
+
+ cb := protoBufferPool.Get().(*cachedProtoBuffer)
+ cb.SetBuf(data)
+ err := cb.Unmarshal(protoMsg)
+ cb.SetBuf(nil)
+ protoBufferPool.Put(cb)
+ return err
+}
+
+func (protoCodec) String() string {
+ return "proto"
+}
+
+var protoBufferPool = &sync.Pool{
+ New: func() interface{} {
+ return &cachedProtoBuffer{
+ Buffer: proto.Buffer{},
+ lastMarshaledSize: 16,
+ }
+ },
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index d9b9d5782..f3719d562 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -19,10 +19,8 @@
// Package codes defines the canonical error codes used by gRPC. It is
// consistent across various languages.
package codes // import "google.golang.org/grpc/codes"
-
import (
"fmt"
- "strconv"
)
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
@@ -35,9 +33,9 @@ const (
// Canceled indicates the operation was canceled (typically by the caller).
Canceled Code = 1
- // Unknown error. An example of where this error may be returned is
+ // Unknown error. An example of where this error may be returned is
// if a Status value received from another address space belongs to
- // an error-space that is not known in this address space. Also
+ // an error-space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
Unknown Code = 2
@@ -66,11 +64,15 @@ const (
// PermissionDenied indicates the caller does not have permission to
// execute the specified operation. It must not be used for rejections
// caused by exhausting some resource (use ResourceExhausted
- // instead for those errors). It must not be
+ // instead for those errors). It must not be
// used if the caller cannot be identified (use Unauthenticated
// instead for those errors).
PermissionDenied Code = 7
+ // Unauthenticated indicates the request does not have valid
+ // authentication credentials for the operation.
+ Unauthenticated Code = 16
+
// ResourceExhausted indicates some resource has been exhausted, perhaps
// a per-user quota, or perhaps the entire file system is out of space.
ResourceExhausted Code = 8
@@ -86,7 +88,7 @@ const (
// (b) Use Aborted if the client should retry at a higher-level
// (e.g., restarting a read-modify-write sequence).
// (c) Use FailedPrecondition if the client should not retry until
- // the system state has been explicitly fixed. E.g., if an "rmdir"
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
// fails because the directory is non-empty, FailedPrecondition
// should be returned since the client should not retry unless
// they have first fixed up the directory by deleting files from it.
@@ -115,7 +117,7 @@ const (
// file size.
//
// There is a fair bit of overlap between FailedPrecondition and
- // OutOfRange. We recommend using OutOfRange (the more specific
+ // OutOfRange. We recommend using OutOfRange (the more specific
// error) when it applies so that callers who are iterating through
// a space can easily look for an OutOfRange error to detect when
// they are done.
@@ -125,8 +127,8 @@ const (
// supported/enabled in this service.
Unimplemented Code = 12
- // Internal errors. Means some invariants expected by underlying
- // system has been broken. If you see one of these errors,
+ // Internal errors. Means some invariants expected by underlying
+ // system has been broken. If you see one of these errors,
// something is very broken.
Internal Code = 13
@@ -140,12 +142,6 @@ const (
// DataLoss indicates unrecoverable data loss or corruption.
DataLoss Code = 15
-
- // Unauthenticated indicates the request does not have valid
- // authentication credentials for the operation.
- Unauthenticated Code = 16
-
- _maxCode = 17
)
var strToCode = map[string]Code{
@@ -179,16 +175,6 @@ func (c *Code) UnmarshalJSON(b []byte) error {
if c == nil {
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
}
-
- if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
- if ci >= _maxCode {
- return fmt.Errorf("invalid code: %q", ci)
- }
-
- *c = Code(ci)
- return nil
- }
-
if jc, ok := strToCode[string(b)]; ok {
*c = jc
return nil
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index 6c2b811fd..1d2e864f8 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -31,7 +31,6 @@ import (
"net"
"strings"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/context"
)
@@ -44,9 +43,8 @@ type PerRPCCredentials interface {
// GetRequestMetadata gets the current request metadata, refreshing
// tokens if required. This should be called by the transport layer on
// each request, and the data should be populated in headers or other
- // context. If a status code is returned, it will be used as the status
- // for the RPC. uri is the URI of the entry point for the request.
- // When supported by the underlying implementation, ctx can be used for
+ // context. uri is the URI of the entry point for the request. When
+ // supported by the underlying implementation, ctx can be used for
// timeout and cancellation.
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
// it as an arbitrary string.
@@ -108,25 +106,6 @@ type TransportCredentials interface {
OverrideServerName(string) error
}
-// Bundle is a combination of TransportCredentials and PerRPCCredentials.
-//
-// It also contains a mode switching method, so it can be used as a combination
-// of different credential policies.
-//
-// Bundle cannot be used together with individual TransportCredentials.
-// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
-//
-// This API is experimental.
-type Bundle interface {
- TransportCredentials() TransportCredentials
- PerRPCCredentials() PerRPCCredentials
- // NewWithMode should make a copy of Bundle, and switch mode. Modifying the
- // existing Bundle may cause races.
- //
- // NewWithMode returns nil if the requested mode is not supported.
- NewWithMode(mode string) (Bundle, error)
-}
-
// TLSInfo contains the auth information for a TLS authenticated connection.
// It implements the AuthInfo interface.
type TLSInfo struct {
@@ -138,18 +117,6 @@ func (t TLSInfo) AuthType() string {
return "tls"
}
-// GetChannelzSecurityValue returns security info requested by channelz.
-func (t TLSInfo) GetChannelzSecurityValue() ChannelzSecurityValue {
- v := &TLSChannelzSecurityValue{
- StandardName: cipherSuiteLookup[t.State.CipherSuite],
- }
- // Currently there's no way to get LocalCertificate info from tls package.
- if len(t.State.PeerCertificates) > 0 {
- v.RemoteCertificate = t.State.PeerCertificates[0].Raw
- }
- return v
-}
-
// tlsCreds is the credentials required for authenticating a connection using TLS.
type tlsCreds struct {
// TLS configuration
@@ -187,7 +154,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
case <-ctx.Done():
return nil, nil, ctx.Err()
}
- return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil
+ return conn, TLSInfo{conn.ConnectionState()}, nil
}
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
@@ -195,7 +162,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
if err := conn.Handshake(); err != nil {
return nil, nil, err
}
- return tlsConn{Conn: conn, rawConn: rawConn}, TLSInfo{conn.ConnectionState()}, nil
+ return conn, TLSInfo{conn.ConnectionState()}, nil
}
func (c *tlsCreds) Clone() TransportCredentials {
@@ -250,63 +217,3 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error
}
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
}
-
-// ChannelzSecurityInfo defines the interface that security protocols should implement
-// in order to provide security info to channelz.
-type ChannelzSecurityInfo interface {
- GetSecurityValue() ChannelzSecurityValue
-}
-
-// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
-// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
-// and *OtherChannelzSecurityValue.
-type ChannelzSecurityValue interface {
- isChannelzSecurityValue()
-}
-
-// TLSChannelzSecurityValue defines the struct that TLS protocol should return
-// from GetSecurityValue(), containing security info like cipher and certificate used.
-type TLSChannelzSecurityValue struct {
- StandardName string
- LocalCertificate []byte
- RemoteCertificate []byte
-}
-
-func (*TLSChannelzSecurityValue) isChannelzSecurityValue() {}
-
-// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
-// from GetSecurityValue(), which contains protocol specific security info. Note
-// the Value field will be sent to users of channelz requesting channel info, and
-// thus sensitive info should better be avoided.
-type OtherChannelzSecurityValue struct {
- Name string
- Value proto.Message
-}
-
-func (*OtherChannelzSecurityValue) isChannelzSecurityValue() {}
-
-type tlsConn struct {
- *tls.Conn
- rawConn net.Conn
-}
-
-var cipherSuiteLookup = map[uint16]string{
- tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
-}
diff --git a/vendor/google.golang.org/grpc/credentials/go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
index fbd500002..60409aac0 100644
--- a/vendor/google.golang.org/grpc/credentials/go17.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go
@@ -1,4 +1,5 @@
-// +build go1.7,!go1.8
+// +build go1.7
+// +build !go1.8
/*
*
diff --git a/vendor/google.golang.org/grpc/credentials/go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
index db30d46cc..93f0e1d8d 100644
--- a/vendor/google.golang.org/grpc/credentials/go18.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go
@@ -24,14 +24,6 @@ import (
"crypto/tls"
)
-func init() {
- cipherSuiteLookup[tls.TLS_RSA_WITH_AES_128_CBC_SHA256] = "TLS_RSA_WITH_AES_128_CBC_SHA256"
- cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256"
- cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256] = "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
- cipherSuiteLookup[tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"
- cipherSuiteLookup[tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305] = "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"
-}
-
// cloneTLSConfig returns a shallow clone of the exported
// fields of cfg, ignoring the unexported sync.Once, which
// contains a mutex and must not be copied.
diff --git a/vendor/google.golang.org/grpc/credentials/go16.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
index d6bbcc9fd..d6bbcc9fd 100644
--- a/vendor/google.golang.org/grpc/credentials/go16.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go
diff --git a/vendor/google.golang.org/grpc/credentials/go19.go b/vendor/google.golang.org/grpc/credentials/go19.go
deleted file mode 100644
index 2a4ca1a57..000000000
--- a/vendor/google.golang.org/grpc/credentials/go19.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build go1.9,!appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import (
- "errors"
- "syscall"
-)
-
-// implements the syscall.Conn interface
-func (c tlsConn) SyscallConn() (syscall.RawConn, error) {
- conn, ok := c.rawConn.(syscall.Conn)
- if !ok {
- return nil, errors.New("RawConn does not implement syscall.Conn")
- }
- return conn.SyscallConn()
-}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
deleted file mode 100644
index 99b495272..000000000
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ /dev/null
@@ -1,465 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "fmt"
- "net"
- "time"
-
- "golang.org/x/net/context"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal"
- "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/envconfig"
- "google.golang.org/grpc/internal/transport"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/stats"
-)
-
-// dialOptions configure a Dial call. dialOptions are set by the DialOption
-// values passed to Dial.
-type dialOptions struct {
- unaryInt UnaryClientInterceptor
- streamInt StreamClientInterceptor
- cp Compressor
- dc Decompressor
- bs backoff.Strategy
- block bool
- insecure bool
- timeout time.Duration
- scChan <-chan ServiceConfig
- authority string
- copts transport.ConnectOptions
- callOptions []CallOption
- // This is used by v1 balancer dial option WithBalancer to support v1
- // balancer, and also by WithBalancerName dial option.
- balancerBuilder balancer.Builder
- // This is to support grpclb.
- resolverBuilder resolver.Builder
- waitForHandshake bool
- channelzParentID int64
- disableServiceConfig bool
- disableRetry bool
-}
-
-// DialOption configures how we set up the connection.
-type DialOption interface {
- apply(*dialOptions)
-}
-
-// EmptyDialOption does not alter the dial configuration. It can be embedded in
-// another structure to build custom dial options.
-//
-// This API is EXPERIMENTAL.
-type EmptyDialOption struct{}
-
-func (EmptyDialOption) apply(*dialOptions) {}
-
-// funcDialOption wraps a function that modifies dialOptions into an
-// implementation of the DialOption interface.
-type funcDialOption struct {
- f func(*dialOptions)
-}
-
-func (fdo *funcDialOption) apply(do *dialOptions) {
- fdo.f(do)
-}
-
-func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
- return &funcDialOption{
- f: f,
- }
-}
-
-// WithWaitForHandshake blocks until the initial settings frame is received from
-// the server before assigning RPCs to the connection. Experimental API.
-func WithWaitForHandshake() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.waitForHandshake = true
- })
-}
-
-// WithWriteBufferSize determines how much data can be batched before doing a
-// write on the wire. The corresponding memory allocation for this buffer will
-// be twice the size to keep syscalls low. The default value for this buffer is
-// 32KB.
-//
-// Zero will disable the write buffer such that each write will be on underlying
-// connection. Note: A Send call may not directly translate to a write.
-func WithWriteBufferSize(s int) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.WriteBufferSize = s
- })
-}
-
-// WithReadBufferSize lets you set the size of read buffer, this determines how
-// much data can be read at most for each read syscall.
-//
-// The default value for this buffer is 32KB. Zero will disable read buffer for
-// a connection so data framer can access the underlying conn directly.
-func WithReadBufferSize(s int) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.ReadBufferSize = s
- })
-}
-
-// WithInitialWindowSize returns a DialOption which sets the value for initial
-// window size on a stream. The lower bound for window size is 64K and any value
-// smaller than that will be ignored.
-func WithInitialWindowSize(s int32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.InitialWindowSize = s
- })
-}
-
-// WithInitialConnWindowSize returns a DialOption which sets the value for
-// initial window size on a connection. The lower bound for window size is 64K
-// and any value smaller than that will be ignored.
-func WithInitialConnWindowSize(s int32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.InitialConnWindowSize = s
- })
-}
-
-// WithMaxMsgSize returns a DialOption which sets the maximum message size the
-// client can receive.
-//
-// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead.
-func WithMaxMsgSize(s int) DialOption {
- return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
-}
-
-// WithDefaultCallOptions returns a DialOption which sets the default
-// CallOptions for calls over the connection.
-func WithDefaultCallOptions(cos ...CallOption) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.callOptions = append(o.callOptions, cos...)
- })
-}
-
-// WithCodec returns a DialOption which sets a codec for message marshaling and
-// unmarshaling.
-//
-// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead.
-func WithCodec(c Codec) DialOption {
- return WithDefaultCallOptions(CallCustomCodec(c))
-}
-
-// WithCompressor returns a DialOption which sets a Compressor to use for
-// message compression. It has lower priority than the compressor set by the
-// UseCompressor CallOption.
-//
-// Deprecated: use UseCompressor instead.
-func WithCompressor(cp Compressor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.cp = cp
- })
-}
-
-// WithDecompressor returns a DialOption which sets a Decompressor to use for
-// incoming message decompression. If incoming response messages are encoded
-// using the decompressor's Type(), it will be used. Otherwise, the message
-// encoding will be used to look up the compressor registered via
-// encoding.RegisterCompressor, which will then be used to decompress the
-// message. If no compressor is registered for the encoding, an Unimplemented
-// status error will be returned.
-//
-// Deprecated: use encoding.RegisterCompressor instead.
-func WithDecompressor(dc Decompressor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.dc = dc
- })
-}
-
-// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
-// Name resolver will be ignored if this DialOption is specified.
-//
-// Deprecated: use the new balancer APIs in balancer package and
-// WithBalancerName.
-func WithBalancer(b Balancer) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.balancerBuilder = &balancerWrapperBuilder{
- b: b,
- }
- })
-}
-
-// WithBalancerName sets the balancer that the ClientConn will be initialized
-// with. Balancer registered with balancerName will be used. This function
-// panics if no balancer was registered by balancerName.
-//
-// The balancer cannot be overridden by balancer option specified by service
-// config.
-//
-// This is an EXPERIMENTAL API.
-func WithBalancerName(balancerName string) DialOption {
- builder := balancer.Get(balancerName)
- if builder == nil {
- panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
- }
- return newFuncDialOption(func(o *dialOptions) {
- o.balancerBuilder = builder
- })
-}
-
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.resolverBuilder = b
- })
-}
-
-// WithServiceConfig returns a DialOption which has a channel to read the
-// service configuration.
-//
-// Deprecated: service config should be received through name resolver, as
-// specified here.
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-func WithServiceConfig(c <-chan ServiceConfig) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.scChan = c
- })
-}
-
-// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
-// when backing off after failed connection attempts.
-func WithBackoffMaxDelay(md time.Duration) DialOption {
- return WithBackoffConfig(BackoffConfig{MaxDelay: md})
-}
-
-// WithBackoffConfig configures the dialer to use the provided backoff
-// parameters after connection failures.
-//
-// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
-// for use.
-func WithBackoffConfig(b BackoffConfig) DialOption {
- return withBackoff(backoff.Exponential{
- MaxDelay: b.MaxDelay,
- })
-}
-
-// withBackoff sets the backoff strategy used for connectRetryNum after a failed
-// connection attempt.
-//
-// This can be exported if arbitrary backoff strategies are allowed by gRPC.
-func withBackoff(bs backoff.Strategy) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.bs = bs
- })
-}
-
-// WithBlock returns a DialOption which makes caller of Dial blocks until the
-// underlying connection is up. Without this, Dial returns immediately and
-// connecting the server happens in background.
-func WithBlock() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.block = true
- })
-}
-
-// WithInsecure returns a DialOption which disables transport security for this
-// ClientConn. Note that transport security is required unless WithInsecure is
-// set.
-func WithInsecure() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.insecure = true
- })
-}
-
-// WithTransportCredentials returns a DialOption which configures a connection
-// level security credentials (e.g., TLS/SSL). This should not be used together
-// with WithCredentialsBundle.
-func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.TransportCredentials = creds
- })
-}
-
-// WithPerRPCCredentials returns a DialOption which sets credentials and places
-// auth state on each outbound RPC.
-func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
- })
-}
-
-// WithCredentialsBundle returns a DialOption to set a credentials bundle for
-// the ClientConn.WithCreds. This should not be used together with
-// WithTransportCredentials.
-//
-// This API is experimental.
-func WithCredentialsBundle(b credentials.Bundle) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.CredsBundle = b
- })
-}
-
-// WithTimeout returns a DialOption that configures a timeout for dialing a
-// ClientConn initially. This is valid if and only if WithBlock() is present.
-//
-// Deprecated: use DialContext and context.WithTimeout instead.
-func WithTimeout(d time.Duration) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.timeout = d
- })
-}
-
-func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.Dialer = f
- })
-}
-
-func init() {
- internal.WithContextDialer = withContextDialer
- internal.WithResolverBuilder = withResolverBuilder
-}
-
-// WithDialer returns a DialOption that specifies a function to use for dialing
-// network addresses. If FailOnNonTempDialError() is set to true, and an error
-// is returned by f, gRPC checks the error's Temporary() method to decide if it
-// should try to reconnect to the network address.
-func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
- return withContextDialer(
- func(ctx context.Context, addr string) (net.Conn, error) {
- if deadline, ok := ctx.Deadline(); ok {
- return f(addr, deadline.Sub(time.Now()))
- }
- return f(addr, 0)
- })
-}
-
-// WithStatsHandler returns a DialOption that specifies the stats handler for
-// all the RPCs and underlying network connections in this ClientConn.
-func WithStatsHandler(h stats.Handler) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.StatsHandler = h
- })
-}
-
-// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
-// non-temporary dial errors. If f is true, and dialer returns a non-temporary
-// error, gRPC will fail the connection to the network address and won't try to
-// reconnect. The default value of FailOnNonTempDialError is false.
-//
-// FailOnNonTempDialError only affects the initial dial, and does not do
-// anything useful unless you are also using WithBlock().
-//
-// This is an EXPERIMENTAL API.
-func FailOnNonTempDialError(f bool) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.FailOnNonTempDialError = f
- })
-}
-
-// WithUserAgent returns a DialOption that specifies a user agent string for all
-// the RPCs.
-func WithUserAgent(s string) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.UserAgent = s
- })
-}
-
-// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
-// for the client transport.
-func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.KeepaliveParams = kp
- })
-}
-
-// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
-// unary RPCs.
-func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.unaryInt = f
- })
-}
-
-// WithStreamInterceptor returns a DialOption that specifies the interceptor for
-// streaming RPCs.
-func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.streamInt = f
- })
-}
-
-// WithAuthority returns a DialOption that specifies the value to be used as the
-// :authority pseudo-header. This value only works with WithInsecure and has no
-// effect if TransportCredentials are present.
-func WithAuthority(a string) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.authority = a
- })
-}
-
-// WithChannelzParentID returns a DialOption that specifies the channelz ID of
-// current ClientConn's parent. This function is used in nested channel creation
-// (e.g. grpclb dial).
-func WithChannelzParentID(id int64) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.channelzParentID = id
- })
-}
-
-// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any
-// service config provided by the resolver and provides a hint to the resolver
-// to not fetch service configs.
-func WithDisableServiceConfig() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.disableServiceConfig = true
- })
-}
-
-// WithDisableRetry returns a DialOption that disables retries, even if the
-// service config enables them. This does not impact transparent retries, which
-// will happen automatically if no data is written to the wire or if the RPC is
-// unprocessed by the remote server.
-//
-// Retry support is currently disabled by default, but will be enabled by
-// default in the future. Until then, it may be enabled by setting the
-// environment variable "GRPC_GO_RETRY" to "on".
-//
-// This API is EXPERIMENTAL.
-func WithDisableRetry() DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.disableRetry = true
- })
-}
-
-// WithMaxHeaderListSize returns a DialOption that specifies the maximum
-// (uncompressed) size of header list that the client is prepared to accept.
-func WithMaxHeaderListSize(s uint32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.MaxHeaderListSize = &s
- })
-}
-
-func defaultDialOptions() dialOptions {
- return dialOptions{
- disableRetry: !envconfig.Retry,
- copts: transport.ConnectOptions{
- WriteBufferSize: defaultWriteBufSize,
- ReadBufferSize: defaultReadBufSize,
- },
- }
-}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index ade8b7cec..47d10b076 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -16,103 +16,46 @@
*
*/
-// Package encoding defines the interface for the compressor and codec, and
-// functions to register and retrieve compressors and codecs.
-//
+// Package encoding defines the interface for the compressor and the functions
+// to register and get the compossor.
// This package is EXPERIMENTAL.
package encoding
import (
"io"
- "strings"
)
-// Identity specifies the optional encoding for uncompressed streams.
-// It is intended for grpc internal use only.
-const Identity = "identity"
+var registerCompressor = make(map[string]Compressor)
-// Compressor is used for compressing and decompressing when sending or
-// receiving messages.
+// Compressor is used for compressing and decompressing when sending or receiving messages.
type Compressor interface {
- // Compress writes the data written to wc to w after compressing it. If an
- // error occurs while initializing the compressor, that error is returned
- // instead.
+ // Compress writes the data written to wc to w after compressing it. If an error
+ // occurs while initializing the compressor, that error is returned instead.
Compress(w io.Writer) (io.WriteCloser, error)
- // Decompress reads data from r, decompresses it, and provides the
- // uncompressed data via the returned io.Reader. If an error occurs while
- // initializing the decompressor, that error is returned instead.
+ // Decompress reads data from r, decompresses it, and provides the uncompressed data
+ // via the returned io.Reader. If an error occurs while initializing the decompressor, that error
+ // is returned instead.
Decompress(r io.Reader) (io.Reader, error)
- // Name is the name of the compression codec and is used to set the content
- // coding header. The result must be static; the result cannot change
- // between calls.
+ // Name is the name of the compression codec and is used to set the content coding header.
Name() string
}
-var registeredCompressor = make(map[string]Compressor)
-
-// RegisterCompressor registers the compressor with gRPC by its name. It can
-// be activated when sending an RPC via grpc.UseCompressor(). It will be
-// automatically accessed when receiving a message based on the content coding
-// header. Servers also use it to send a response with the same encoding as
-// the request.
+// RegisterCompressor registers the compressor with gRPC by its name. It can be activated when
+// sending an RPC via grpc.UseCompressor(). It will be automatically accessed when receiving a
+// message based on the content coding header. Servers also use it to send a response with the
+// same encoding as the request.
//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Compressors are
-// registered with the same name, the one registered last will take effect.
+// NOTE: this function must only be called during initialization time (i.e. in an init() function). If
+// multiple Compressors are registered with the same name, the one registered last will take effect.
func RegisterCompressor(c Compressor) {
- registeredCompressor[c.Name()] = c
+ registerCompressor[c.Name()] = c
}
// GetCompressor returns Compressor for the given compressor name.
func GetCompressor(name string) Compressor {
- return registeredCompressor[name]
-}
-
-// Codec defines the interface gRPC uses to encode and decode messages. Note
-// that implementations of this interface must be thread safe; a Codec's
-// methods can be called from concurrent goroutines.
-type Codec interface {
- // Marshal returns the wire format of v.
- Marshal(v interface{}) ([]byte, error)
- // Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v interface{}) error
- // Name returns the name of the Codec implementation. The returned string
- // will be used as part of content type in transmission. The result must be
- // static; the result cannot change between calls.
- Name() string
-}
-
-var registeredCodecs = make(map[string]Codec)
-
-// RegisterCodec registers the provided Codec for use with all gRPC clients and
-// servers.
-//
-// The Codec will be stored and looked up by result of its Name() method, which
-// should match the content-subtype of the encoding handled by the Codec. This
-// is case-insensitive, and is stored and looked up as lowercase. If the
-// result of calling Name() is an empty string, RegisterCodec will panic. See
-// Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Compressors are
-// registered with the same name, the one registered last will take effect.
-func RegisterCodec(codec Codec) {
- if codec == nil {
- panic("cannot register a nil Codec")
- }
- contentSubtype := strings.ToLower(codec.Name())
- if contentSubtype == "" {
- panic("cannot register Codec with empty string result for String()")
- }
- registeredCodecs[contentSubtype] = codec
+ return registerCompressor[name]
}
-// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
-// registered for the content-subtype.
-//
-// The content-subtype is expected to be lowercase.
-func GetCodec(contentSubtype string) Codec {
- return registeredCodecs[contentSubtype]
-}
+// Identity specifies the optional encoding for uncompressed streams.
+// It is intended for grpc internal use only.
+const Identity = "identity"
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
deleted file mode 100644
index 66b97a6f6..000000000
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package proto defines the protobuf codec. Importing this package will
-// register the codec.
-package proto
-
-import (
- "math"
- "sync"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/grpc/encoding"
-)
-
-// Name is the name registered for the proto compressor.
-const Name = "proto"
-
-func init() {
- encoding.RegisterCodec(codec{})
-}
-
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
-
-type cachedProtoBuffer struct {
- lastMarshaledSize uint32
- proto.Buffer
-}
-
-func capToMaxInt32(val int) uint32 {
- if val > math.MaxInt32 {
- return uint32(math.MaxInt32)
- }
- return uint32(val)
-}
-
-func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
- protoMsg := v.(proto.Message)
- newSlice := make([]byte, 0, cb.lastMarshaledSize)
-
- cb.SetBuf(newSlice)
- cb.Reset()
- if err := cb.Marshal(protoMsg); err != nil {
- return nil, err
- }
- out := cb.Bytes()
- cb.lastMarshaledSize = capToMaxInt32(len(out))
- return out, nil
-}
-
-func (codec) Marshal(v interface{}) ([]byte, error) {
- if pm, ok := v.(proto.Marshaler); ok {
- // object can marshal itself, no need for buffer
- return pm.Marshal()
- }
-
- cb := protoBufferPool.Get().(*cachedProtoBuffer)
- out, err := marshal(v, cb)
-
- // put back buffer and lose the ref to the slice
- cb.SetBuf(nil)
- protoBufferPool.Put(cb)
- return out, err
-}
-
-func (codec) Unmarshal(data []byte, v interface{}) error {
- protoMsg := v.(proto.Message)
- protoMsg.Reset()
-
- if pu, ok := protoMsg.(proto.Unmarshaler); ok {
- // object can unmarshal itself, no need for buffer
- return pu.Unmarshal(data)
- }
-
- cb := protoBufferPool.Get().(*cachedProtoBuffer)
- cb.SetBuf(data)
- err := cb.Unmarshal(protoMsg)
- cb.SetBuf(nil)
- protoBufferPool.Put(cb)
- return err
-}
-
-func (codec) Name() string {
- return Name
-}
-
-var protoBufferPool = &sync.Pool{
- New: func() interface{} {
- return &cachedProtoBuffer{
- Buffer: proto.Buffer{},
- lastMarshaledSize: 16,
- }
- },
-}
diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod
deleted file mode 100644
index 1d16f5d4d..000000000
--- a/vendor/google.golang.org/grpc/go.mod
+++ /dev/null
@@ -1,21 +0,0 @@
-module google.golang.org/grpc
-
-require (
- cloud.google.com/go v0.26.0 // indirect
- github.com/client9/misspell v0.3.4
- github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
- github.com/golang/lint v0.0.0-20180702182130-06c8688daad7
- github.com/golang/mock v1.1.1
- github.com/golang/protobuf v1.2.0
- github.com/kisielk/gotool v1.0.0 // indirect
- golang.org/x/lint v0.0.0-20180702182130-06c8688daad7 // indirect
- golang.org/x/net v0.0.0-20180826012351-8a410e7b638d
- golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
- golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f // indirect
- golang.org/x/sys v0.0.0-20180830151530-49385e6e1522
- golang.org/x/text v0.3.0 // indirect
- golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52
- google.golang.org/appengine v1.1.0 // indirect
- google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8
- honnef.co/go/tools v0.0.0-20180728063816-88497007e858
-)
diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum
deleted file mode 100644
index 6b70e58e5..000000000
--- a/vendor/google.golang.org/grpc/go.sum
+++ /dev/null
@@ -1,34 +0,0 @@
-cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ=
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
-github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=
-github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
-github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-golang.org/x/lint v0.0.0-20180702182130-06c8688daad7 h1:00BeQWmeaGazuOrq8Q5K5d3/cHaGuFrZzpaHBXfrsUA=
-golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs=
-golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52 h1:JG/0uqcGdTNgq7FdU+61l5Pdmb8putNZlXb65bJBROs=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs=
-google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858 h1:wN+eVZ7U+gqdqkec6C6VXR1OFf9a5Ul9ETzeYsYv20g=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go
index b1db21af6..f3dbf2170 100644
--- a/vendor/google.golang.org/grpc/go16.go
+++ b/vendor/google.golang.org/grpc/go16.go
@@ -25,11 +25,12 @@ import (
"io"
"net"
"net/http"
+ "os"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
)
// dialContext connects to the address on the named network.
@@ -47,16 +48,12 @@ func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) erro
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
- if err == nil || err == io.EOF {
- return err
- }
- if err == io.ErrUnexpectedEOF {
- return status.Error(codes.Internal, err.Error())
- }
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
+ case transport.StreamError:
+ return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
@@ -65,7 +62,37 @@ func toRPCErr(err error) error {
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled:
return status.Error(codes.Canceled, err.Error())
+ case ErrClientConnClosing:
+ return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
+
+// convertCode converts a standard Go error into its canonical code. Note that
+// this is only used to translate the error returned by the server applications.
+func convertCode(err error) codes.Code {
+ switch err {
+ case nil:
+ return codes.OK
+ case io.EOF:
+ return codes.OutOfRange
+ case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+ return codes.FailedPrecondition
+ case os.ErrInvalid:
+ return codes.InvalidArgument
+ case context.Canceled:
+ return codes.Canceled
+ case context.DeadlineExceeded:
+ return codes.DeadlineExceeded
+ }
+ switch {
+ case os.IsExist(err):
+ return codes.AlreadyExists
+ case os.IsNotExist(err):
+ return codes.NotFound
+ case os.IsPermission(err):
+ return codes.PermissionDenied
+ }
+ return codes.Unknown
+}
diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go
index 71a72e8fe..de23098eb 100644
--- a/vendor/google.golang.org/grpc/go17.go
+++ b/vendor/google.golang.org/grpc/go17.go
@@ -26,11 +26,12 @@ import (
"io"
"net"
"net/http"
+ "os"
netctx "golang.org/x/net/context"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
)
// dialContext connects to the address on the named network.
@@ -48,16 +49,12 @@ func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) erro
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
- if err == nil || err == io.EOF {
- return err
- }
- if err == io.ErrUnexpectedEOF {
- return status.Error(codes.Internal, err.Error())
- }
if _, ok := status.FromError(err); ok {
return err
}
switch e := err.(type) {
+ case transport.StreamError:
+ return status.Error(e.Code, e.Desc)
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
default:
@@ -66,7 +63,37 @@ func toRPCErr(err error) error {
return status.Error(codes.DeadlineExceeded, err.Error())
case context.Canceled, netctx.Canceled:
return status.Error(codes.Canceled, err.Error())
+ case ErrClientConnClosing:
+ return status.Error(codes.FailedPrecondition, err.Error())
}
}
return status.Error(codes.Unknown, err.Error())
}
+
+// convertCode converts a standard Go error into its canonical code. Note that
+// this is only used to translate the error returned by the server applications.
+func convertCode(err error) codes.Code {
+ switch err {
+ case nil:
+ return codes.OK
+ case io.EOF:
+ return codes.OutOfRange
+ case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+ return codes.FailedPrecondition
+ case os.ErrInvalid:
+ return codes.InvalidArgument
+ case context.Canceled, netctx.Canceled:
+ return codes.Canceled
+ case context.DeadlineExceeded, netctx.DeadlineExceeded:
+ return codes.DeadlineExceeded
+ }
+ switch {
+ case os.IsExist(err):
+ return codes.AlreadyExists
+ case os.IsNotExist(err):
+ return codes.NotFound
+ case os.IsPermission(err):
+ return codes.PermissionDenied
+ }
+ return codes.Unknown
+}
diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go
new file mode 100644
index 000000000..d14a5d409
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclb.go
@@ -0,0 +1,342 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/resolver"
+)
+
+const (
+ lbTokeyKey = "lb-token"
+ defaultFallbackTimeout = 10 * time.Second
+ grpclbName = "grpclb"
+)
+
+func convertDuration(d *lbpb.Duration) time.Duration {
+ if d == nil {
+ return 0
+ }
+ return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
+}
+
+// Client API for LoadBalancer service.
+// Mostly copied from generated pb.go file.
+// To avoid circular dependency.
+type loadBalancerClient struct {
+ cc *ClientConn
+}
+
+func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) {
+ desc := &StreamDesc{
+ StreamName: "BalanceLoad",
+ ServerStreams: true,
+ ClientStreams: true,
+ }
+ stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &balanceLoadClientStream{stream}
+ return x, nil
+}
+
+type balanceLoadClientStream struct {
+ ClientStream
+}
+
+func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
+ m := new(lbpb.LoadBalanceResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func init() {
+ balancer.Register(newLBBuilder())
+}
+
+// newLBBuilder creates a builder for grpclb.
+func newLBBuilder() balancer.Builder {
+ return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
+}
+
+// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
+// fallbackTimeout. If no response is received from the remote balancer within
+// fallbackTimeout, the backend addresses from the resolved address list will be
+// used.
+//
+// Only call this function when a non-default fallback timeout is needed.
+func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
+ return &lbBuilder{
+ fallbackTimeout: fallbackTimeout,
+ }
+}
+
+type lbBuilder struct {
+ fallbackTimeout time.Duration
+}
+
+func (b *lbBuilder) Name() string {
+ return grpclbName
+}
+
+func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+ // This generates a manual resolver builder with a random scheme. This
+ // scheme will be used to dial to remote LB, so we can send filtered address
+ // updates to remote LB ClientConn using this manual resolver.
+ scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
+ r := &lbManualResolver{scheme: scheme, ccb: cc}
+
+ var target string
+ targetSplitted := strings.Split(cc.Target(), ":///")
+ if len(targetSplitted) < 2 {
+ target = cc.Target()
+ } else {
+ target = targetSplitted[1]
+ }
+
+ lb := &lbBalancer{
+ cc: cc,
+ target: target,
+ opt: opt,
+ fallbackTimeout: b.fallbackTimeout,
+ doneCh: make(chan struct{}),
+
+ manualResolver: r,
+ csEvltr: &connectivityStateEvaluator{},
+ subConns: make(map[resolver.Address]balancer.SubConn),
+ scStates: make(map[balancer.SubConn]connectivity.State),
+ picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
+ clientStats: &rpcStats{},
+ }
+
+ return lb
+}
+
+type lbBalancer struct {
+ cc balancer.ClientConn
+ target string
+ opt balancer.BuildOptions
+ fallbackTimeout time.Duration
+ doneCh chan struct{}
+
+ // manualResolver is used in the remote LB ClientConn inside grpclb. When
+ // resolved address updates are received by grpclb, filtered updates will be
+ // send to remote LB ClientConn through this resolver.
+ manualResolver *lbManualResolver
+ // The ClientConn to talk to the remote balancer.
+ ccRemoteLB *ClientConn
+
+ // Support client side load reporting. Each picker gets a reference to this,
+ // and will update its content.
+ clientStats *rpcStats
+
+ mu sync.Mutex // guards everything following.
+ // The full server list including drops, used to check if the newly received
+ // serverList contains anything new. Each generate picker will also have
+ // reference to this list to do the first layer pick.
+ fullServerList []*lbpb.Server
+ // All backends addresses, with metadata set to nil. This list contains all
+ // backend addresses in the same order and with the same duplicates as in
+ // serverlist. When generating picker, a SubConn slice with the same order
+ // but with only READY SCs will be gerenated.
+ backendAddrs []resolver.Address
+ // Roundrobin functionalities.
+ csEvltr *connectivityStateEvaluator
+ state connectivity.State
+ subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
+ scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
+ picker balancer.Picker
+ // Support fallback to resolved backend addresses if there's no response
+ // from remote balancer within fallbackTimeout.
+ fallbackTimerExpired bool
+ serverListReceived bool
+ // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
+ // when resolved address updates are received, and read in the goroutine
+ // handling fallback.
+ resolvedBackendAddrs []resolver.Address
+}
+
+// regeneratePicker takes a snapshot of the balancer, and generates a picker from
+// it. The picker
+// - always returns ErrTransientFailure if the balancer is in TransientFailure,
+// - does two layer roundrobin pick otherwise.
+// Caller must hold lb.mu.
+func (lb *lbBalancer) regeneratePicker() {
+ if lb.state == connectivity.TransientFailure {
+ lb.picker = &errPicker{err: balancer.ErrTransientFailure}
+ return
+ }
+ var readySCs []balancer.SubConn
+ for _, a := range lb.backendAddrs {
+ if sc, ok := lb.subConns[a]; ok {
+ if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
+ readySCs = append(readySCs, sc)
+ }
+ }
+ }
+
+ if len(lb.fullServerList) <= 0 {
+ if len(readySCs) <= 0 {
+ lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
+ return
+ }
+ lb.picker = &rrPicker{subConns: readySCs}
+ return
+ }
+ lb.picker = &lbPicker{
+ serverList: lb.fullServerList,
+ subConns: readySCs,
+ stats: lb.clientStats,
+ }
+ return
+}
+
+func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
+ grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
+ lb.mu.Lock()
+ defer lb.mu.Unlock()
+
+ oldS, ok := lb.scStates[sc]
+ if !ok {
+ grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+ return
+ }
+ lb.scStates[sc] = s
+ switch s {
+ case connectivity.Idle:
+ sc.Connect()
+ case connectivity.Shutdown:
+ // When an address was removed by resolver, b called RemoveSubConn but
+ // kept the sc's state in scStates. Remove state for this sc here.
+ delete(lb.scStates, sc)
+ }
+
+ oldAggrState := lb.state
+ lb.state = lb.csEvltr.recordTransition(oldS, s)
+
+ // Regenerate picker when one of the following happens:
+ // - this sc became ready from not-ready
+ // - this sc became not-ready from ready
+ // - the aggregated state of balancer became TransientFailure from non-TransientFailure
+ // - the aggregated state of balancer became non-TransientFailure from TransientFailure
+ if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
+ (lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+ lb.regeneratePicker()
+ }
+
+ lb.cc.UpdateBalancerState(lb.state, lb.picker)
+ return
+}
+
+// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
+// resolved backends (backends received from resolver, not from remote balancer)
+// if no connection to remote balancers was successful.
+func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
+ timer := time.NewTimer(fallbackTimeout)
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ case <-lb.doneCh:
+ return
+ }
+ lb.mu.Lock()
+ if lb.serverListReceived {
+ lb.mu.Unlock()
+ return
+ }
+ lb.fallbackTimerExpired = true
+ lb.refreshSubConns(lb.resolvedBackendAddrs)
+ lb.mu.Unlock()
+}
+
+// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
+// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
+// connections.
+func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+ grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
+ if len(addrs) <= 0 {
+ return
+ }
+
+ var remoteBalancerAddrs, backendAddrs []resolver.Address
+ for _, a := range addrs {
+ if a.Type == resolver.GRPCLB {
+ remoteBalancerAddrs = append(remoteBalancerAddrs, a)
+ } else {
+ backendAddrs = append(backendAddrs, a)
+ }
+ }
+
+ if lb.ccRemoteLB == nil {
+ if len(remoteBalancerAddrs) <= 0 {
+ grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
+ return
+ }
+ // First time receiving resolved addresses, create a cc to remote
+ // balancers.
+ lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
+ // Start the fallback goroutine.
+ go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
+ }
+
+ // cc to remote balancers uses lb.manualResolver. Send the updated remote
+ // balancer addresses to it through manualResolver.
+ lb.manualResolver.NewAddress(remoteBalancerAddrs)
+
+ lb.mu.Lock()
+ lb.resolvedBackendAddrs = backendAddrs
+ // If serverListReceived is true, connection to remote balancer was
+ // successful and there's no need to do fallback anymore.
+ // If fallbackTimerExpired is false, fallback hasn't happened yet.
+ if !lb.serverListReceived && lb.fallbackTimerExpired {
+ // This means we received a new list of resolved backends, and we are
+ // still in fallback mode. Need to update the list of backends we are
+ // using to the new list of backends.
+ lb.refreshSubConns(lb.resolvedBackendAddrs)
+ }
+ lb.mu.Unlock()
+}
+
+func (lb *lbBalancer) Close() {
+ select {
+ case <-lb.doneCh:
+ return
+ default:
+ }
+ close(lb.doneCh)
+ if lb.ccRemoteLB != nil {
+ lb.ccRemoteLB.Close()
+ }
+}
diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
new file mode 100644
index 000000000..f4a27125a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go
@@ -0,0 +1,615 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: grpc_lb_v1/messages/messages.proto
+
+/*
+Package messages is a generated protocol buffer package.
+
+It is generated from these files:
+ grpc_lb_v1/messages/messages.proto
+
+It has these top-level messages:
+ Duration
+ Timestamp
+ LoadBalanceRequest
+ InitialLoadBalanceRequest
+ ClientStats
+ LoadBalanceResponse
+ InitialLoadBalanceResponse
+ ServerList
+ Server
+*/
+package messages
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Duration struct {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Duration) Reset() { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *Duration) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+type Timestamp struct {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+}
+
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Timestamp) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
+}
+
+type LoadBalanceRequest struct {
+ // Types that are valid to be assigned to LoadBalanceRequestType:
+ // *LoadBalanceRequest_InitialRequest
+ // *LoadBalanceRequest_ClientStats
+ LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"`
+}
+
+func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} }
+func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) }
+func (*LoadBalanceRequest) ProtoMessage() {}
+func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+type isLoadBalanceRequest_LoadBalanceRequestType interface {
+ isLoadBalanceRequest_LoadBalanceRequestType()
+}
+
+type LoadBalanceRequest_InitialRequest struct {
+ InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"`
+}
+type LoadBalanceRequest_ClientStats struct {
+ ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"`
+}
+
+func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {}
+func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {}
+
+func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType {
+ if m != nil {
+ return m.LoadBalanceRequestType
+ }
+ return nil
+}
+
+func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest {
+ if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok {
+ return x.InitialRequest
+ }
+ return nil
+}
+
+func (m *LoadBalanceRequest) GetClientStats() *ClientStats {
+ if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok {
+ return x.ClientStats
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{
+ (*LoadBalanceRequest_InitialRequest)(nil),
+ (*LoadBalanceRequest_ClientStats)(nil),
+ }
+}
+
+func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*LoadBalanceRequest)
+ // load_balance_request_type
+ switch x := m.LoadBalanceRequestType.(type) {
+ case *LoadBalanceRequest_InitialRequest:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.InitialRequest); err != nil {
+ return err
+ }
+ case *LoadBalanceRequest_ClientStats:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ClientStats); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*LoadBalanceRequest)
+ switch tag {
+ case 1: // load_balance_request_type.initial_request
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(InitialLoadBalanceRequest)
+ err := b.DecodeMessage(msg)
+ m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg}
+ return true, err
+ case 2: // load_balance_request_type.client_stats
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ClientStats)
+ err := b.DecodeMessage(msg)
+ m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*LoadBalanceRequest)
+ // load_balance_request_type
+ switch x := m.LoadBalanceRequestType.(type) {
+ case *LoadBalanceRequest_InitialRequest:
+ s := proto.Size(x.InitialRequest)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *LoadBalanceRequest_ClientStats:
+ s := proto.Size(x.ClientStats)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type InitialLoadBalanceRequest struct {
+ // Name of load balanced service (IE, balancer.service.com)
+ // length should be less than 256 bytes.
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} }
+func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) }
+func (*InitialLoadBalanceRequest) ProtoMessage() {}
+func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *InitialLoadBalanceRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// Contains client level statistics that are useful to load balancing. Each
+// count except the timestamp should be reset to zero after reporting the stats.
+type ClientStats struct {
+ // The timestamp of generating the report.
+ Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"`
+ // The total number of RPCs that started.
+ NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"`
+ // The total number of RPCs that finished.
+ NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"`
+ // The total number of RPCs that were dropped by the client because of rate
+ // limiting.
+ NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"`
+ // The total number of RPCs that were dropped by the client because of load
+ // balancing.
+ NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"`
+ // The total number of RPCs that failed to reach a server except dropped RPCs.
+ NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"`
+ // The total number of RPCs that finished and are known to have been received
+ // by a server.
+ NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"`
+}
+
+func (m *ClientStats) Reset() { *m = ClientStats{} }
+func (m *ClientStats) String() string { return proto.CompactTextString(m) }
+func (*ClientStats) ProtoMessage() {}
+func (*ClientStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *ClientStats) GetTimestamp() *Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+func (m *ClientStats) GetNumCallsStarted() int64 {
+ if m != nil {
+ return m.NumCallsStarted
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinished() int64 {
+ if m != nil {
+ return m.NumCallsFinished
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 {
+ if m != nil {
+ return m.NumCallsFinishedWithDropForRateLimiting
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 {
+ if m != nil {
+ return m.NumCallsFinishedWithDropForLoadBalancing
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 {
+ if m != nil {
+ return m.NumCallsFinishedWithClientFailedToSend
+ }
+ return 0
+}
+
+func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 {
+ if m != nil {
+ return m.NumCallsFinishedKnownReceived
+ }
+ return 0
+}
+
+type LoadBalanceResponse struct {
+ // Types that are valid to be assigned to LoadBalanceResponseType:
+ // *LoadBalanceResponse_InitialResponse
+ // *LoadBalanceResponse_ServerList
+ LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"`
+}
+
+func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} }
+func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) }
+func (*LoadBalanceResponse) ProtoMessage() {}
+func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+type isLoadBalanceResponse_LoadBalanceResponseType interface {
+ isLoadBalanceResponse_LoadBalanceResponseType()
+}
+
+type LoadBalanceResponse_InitialResponse struct {
+ InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"`
+}
+type LoadBalanceResponse_ServerList struct {
+ ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"`
+}
+
+func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {}
+func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {}
+
+func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType {
+ if m != nil {
+ return m.LoadBalanceResponseType
+ }
+ return nil
+}
+
+func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse {
+ if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok {
+ return x.InitialResponse
+ }
+ return nil
+}
+
+func (m *LoadBalanceResponse) GetServerList() *ServerList {
+ if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok {
+ return x.ServerList
+ }
+ return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+ return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{
+ (*LoadBalanceResponse_InitialResponse)(nil),
+ (*LoadBalanceResponse_ServerList)(nil),
+ }
+}
+
+func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+ m := msg.(*LoadBalanceResponse)
+ // load_balance_response_type
+ switch x := m.LoadBalanceResponseType.(type) {
+ case *LoadBalanceResponse_InitialResponse:
+ b.EncodeVarint(1<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.InitialResponse); err != nil {
+ return err
+ }
+ case *LoadBalanceResponse_ServerList:
+ b.EncodeVarint(2<<3 | proto.WireBytes)
+ if err := b.EncodeMessage(x.ServerList); err != nil {
+ return err
+ }
+ case nil:
+ default:
+ return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x)
+ }
+ return nil
+}
+
+func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+ m := msg.(*LoadBalanceResponse)
+ switch tag {
+ case 1: // load_balance_response_type.initial_response
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(InitialLoadBalanceResponse)
+ err := b.DecodeMessage(msg)
+ m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg}
+ return true, err
+ case 2: // load_balance_response_type.server_list
+ if wire != proto.WireBytes {
+ return true, proto.ErrInternalBadWireType
+ }
+ msg := new(ServerList)
+ err := b.DecodeMessage(msg)
+ m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg}
+ return true, err
+ default:
+ return false, nil
+ }
+}
+
+func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) {
+ m := msg.(*LoadBalanceResponse)
+ // load_balance_response_type
+ switch x := m.LoadBalanceResponseType.(type) {
+ case *LoadBalanceResponse_InitialResponse:
+ s := proto.Size(x.InitialResponse)
+ n += proto.SizeVarint(1<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case *LoadBalanceResponse_ServerList:
+ s := proto.Size(x.ServerList)
+ n += proto.SizeVarint(2<<3 | proto.WireBytes)
+ n += proto.SizeVarint(uint64(s))
+ n += s
+ case nil:
+ default:
+ panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+ }
+ return n
+}
+
+type InitialLoadBalanceResponse struct {
+ // This is an application layer redirect that indicates the client should use
+ // the specified server for load balancing. When this field is non-empty in
+ // the response, the client should open a separate connection to the
+ // load_balancer_delegate and call the BalanceLoad method. Its length should
+ // be less than 64 bytes.
+ LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"`
+ // This interval defines how often the client should send the client stats
+ // to the load balancer. Stats should only be reported when the duration is
+ // positive.
+ ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"`
+}
+
+func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} }
+func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) }
+func (*InitialLoadBalanceResponse) ProtoMessage() {}
+func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string {
+ if m != nil {
+ return m.LoadBalancerDelegate
+ }
+ return ""
+}
+
+func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration {
+ if m != nil {
+ return m.ClientStatsReportInterval
+ }
+ return nil
+}
+
+type ServerList struct {
+ // Contains a list of servers selected by the load balancer. The list will
+ // be updated when server resolutions change or as needed to balance load
+ // across more servers. The client should consume the server list in order
+ // unless instructed otherwise via the client_config.
+ Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"`
+}
+
+func (m *ServerList) Reset() { *m = ServerList{} }
+func (m *ServerList) String() string { return proto.CompactTextString(m) }
+func (*ServerList) ProtoMessage() {}
+func (*ServerList) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *ServerList) GetServers() []*Server {
+ if m != nil {
+ return m.Servers
+ }
+ return nil
+}
+
+// Contains server information. When none of the [drop_for_*] fields are true,
+// use the other fields. When drop_for_rate_limiting is true, ignore all other
+// fields. Use drop_for_load_balancing only when it is true and
+// drop_for_rate_limiting is false.
+type Server struct {
+ // A resolved address for the server, serialized in network-byte-order. It may
+ // either be an IPv4 or IPv6 address.
+ IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
+ // A resolved port number for the server.
+ Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"`
+ // An opaque but printable token given to the frontend for each pick. All
+ // frontend requests for that pick must include the token in its initial
+ // metadata. The token is used by the backend to verify the request and to
+ // allow the backend to report load to the gRPC LB system.
+ //
+ // Its length is variable but less than 50 bytes.
+ LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"`
+ // Indicates whether this particular request should be dropped by the client
+ // for rate limiting.
+ DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"`
+ // Indicates whether this particular request should be dropped by the client
+ // for load balancing.
+ DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"`
+}
+
+func (m *Server) Reset() { *m = Server{} }
+func (m *Server) String() string { return proto.CompactTextString(m) }
+func (*Server) ProtoMessage() {}
+func (*Server) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *Server) GetIpAddress() []byte {
+ if m != nil {
+ return m.IpAddress
+ }
+ return nil
+}
+
+func (m *Server) GetPort() int32 {
+ if m != nil {
+ return m.Port
+ }
+ return 0
+}
+
+func (m *Server) GetLoadBalanceToken() string {
+ if m != nil {
+ return m.LoadBalanceToken
+ }
+ return ""
+}
+
+func (m *Server) GetDropForRateLimiting() bool {
+ if m != nil {
+ return m.DropForRateLimiting
+ }
+ return false
+}
+
+func (m *Server) GetDropForLoadBalancing() bool {
+ if m != nil {
+ return m.DropForLoadBalancing
+ }
+ return false
+}
+
+func init() {
+ proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration")
+ proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp")
+ proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest")
+ proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest")
+ proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats")
+ proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse")
+ proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse")
+ proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList")
+ proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server")
+}
+
+func init() { proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 709 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x3b,
+ 0x10, 0x26, 0x27, 0x01, 0x92, 0x09, 0x3a, 0xe4, 0x98, 0x1c, 0x08, 0x14, 0x24, 0xba, 0x52, 0x69,
+ 0x54, 0xd1, 0x20, 0xa0, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, 0x55,
+ 0xa9, 0x52, 0x65, 0x39, 0xd9, 0x21, 0x58, 0x6c, 0xec, 0xad, 0xed, 0x04, 0xf5, 0x11, 0xfa, 0x28,
+ 0x7d, 0x8c, 0xaa, 0xcf, 0xd0, 0xf7, 0xa9, 0xd6, 0xbb, 0x9b, 0x5d, 0x20, 0x80, 0x7a, 0x67, 0x8f,
+ 0xbf, 0xf9, 0xbe, 0xf1, 0xac, 0xbf, 0x59, 0xf0, 0x06, 0x3a, 0xec, 0xb3, 0xa0, 0xc7, 0xc6, 0xbb,
+ 0x3b, 0x43, 0x34, 0x86, 0x0f, 0xd0, 0x4c, 0x16, 0xad, 0x50, 0x2b, 0xab, 0x08, 0x44, 0x98, 0x56,
+ 0xd0, 0x6b, 0x8d, 0x77, 0xbd, 0x97, 0x50, 0x3e, 0x1c, 0x69, 0x6e, 0x85, 0x92, 0xa4, 0x01, 0xf3,
+ 0x06, 0xfb, 0x4a, 0xfa, 0xa6, 0x51, 0xd8, 0x2c, 0x34, 0x8b, 0x34, 0xdd, 0x92, 0x3a, 0xcc, 0x4a,
+ 0x2e, 0x95, 0x69, 0xfc, 0xb3, 0x59, 0x68, 0xce, 0xd2, 0x78, 0xe3, 0xbd, 0x82, 0xca, 0xa9, 0x18,
+ 0xa2, 0xb1, 0x7c, 0x18, 0xfe, 0x75, 0xf2, 0xcf, 0x02, 0x90, 0x13, 0xc5, 0xfd, 0x36, 0x0f, 0xb8,
+ 0xec, 0x23, 0xc5, 0xaf, 0x23, 0x34, 0x96, 0x7c, 0x80, 0x45, 0x21, 0x85, 0x15, 0x3c, 0x60, 0x3a,
+ 0x0e, 0x39, 0xba, 0xea, 0xde, 0xa3, 0x56, 0x56, 0x75, 0xeb, 0x38, 0x86, 0xdc, 0xcc, 0xef, 0xcc,
+ 0xd0, 0x7f, 0x93, 0xfc, 0x94, 0xf1, 0x35, 0x2c, 0xf4, 0x03, 0x81, 0xd2, 0x32, 0x63, 0xb9, 0x8d,
+ 0xab, 0xa8, 0xee, 0xad, 0xe4, 0xe9, 0x0e, 0xdc, 0x79, 0x37, 0x3a, 0xee, 0xcc, 0xd0, 0x6a, 0x3f,
+ 0xdb, 0xb6, 0x1f, 0xc0, 0x6a, 0xa0, 0xb8, 0xcf, 0x7a, 0xb1, 0x4c, 0x5a, 0x14, 0xb3, 0xdf, 0x42,
+ 0xf4, 0x76, 0x60, 0xf5, 0xd6, 0x4a, 0x08, 0x81, 0x92, 0xe4, 0x43, 0x74, 0xe5, 0x57, 0xa8, 0x5b,
+ 0x7b, 0xdf, 0x4b, 0x50, 0xcd, 0x89, 0x91, 0x7d, 0xa8, 0xd8, 0xb4, 0x83, 0xc9, 0x3d, 0xff, 0xcf,
+ 0x17, 0x36, 0x69, 0x2f, 0xcd, 0x70, 0xe4, 0x09, 0xfc, 0x27, 0x47, 0x43, 0xd6, 0xe7, 0x41, 0x60,
+ 0xa2, 0x3b, 0x69, 0x8b, 0xbe, 0xbb, 0x55, 0x91, 0x2e, 0xca, 0xd1, 0xf0, 0x20, 0x8a, 0x77, 0xe3,
+ 0x30, 0xd9, 0x06, 0x92, 0x61, 0xcf, 0x84, 0x14, 0xe6, 0x1c, 0xfd, 0x46, 0xd1, 0x81, 0x6b, 0x29,
+ 0xf8, 0x28, 0x89, 0x13, 0x06, 0xad, 0x9b, 0x68, 0x76, 0x29, 0xec, 0x39, 0xf3, 0xb5, 0x0a, 0xd9,
+ 0x99, 0xd2, 0x4c, 0x73, 0x8b, 0x2c, 0x10, 0x43, 0x61, 0x85, 0x1c, 0x34, 0x4a, 0x8e, 0xe9, 0xf1,
+ 0x75, 0xa6, 0x4f, 0xc2, 0x9e, 0x1f, 0x6a, 0x15, 0x1e, 0x29, 0x4d, 0xb9, 0xc5, 0x93, 0x04, 0x4e,
+ 0x38, 0xec, 0xdc, 0x2b, 0x90, 0x6b, 0x77, 0xa4, 0x30, 0xeb, 0x14, 0x9a, 0x77, 0x28, 0x64, 0xbd,
+ 0x8f, 0x24, 0xbe, 0xc0, 0xd3, 0xdb, 0x24, 0x92, 0x67, 0x70, 0xc6, 0x45, 0x80, 0x3e, 0xb3, 0x8a,
+ 0x19, 0x94, 0x7e, 0x63, 0xce, 0x09, 0x6c, 0x4d, 0x13, 0x88, 0x3f, 0xd5, 0x91, 0xc3, 0x9f, 0xaa,
+ 0x2e, 0x4a, 0x9f, 0x74, 0xe0, 0xe1, 0x14, 0xfa, 0x0b, 0xa9, 0x2e, 0x25, 0xd3, 0xd8, 0x47, 0x31,
+ 0x46, 0xbf, 0x31, 0xef, 0x28, 0x37, 0xae, 0x53, 0xbe, 0x8f, 0x50, 0x34, 0x01, 0x79, 0xbf, 0x0a,
+ 0xb0, 0x74, 0xe5, 0xd9, 0x98, 0x50, 0x49, 0x83, 0xa4, 0x0b, 0xb5, 0xcc, 0x01, 0x71, 0x2c, 0x79,
+ 0x1a, 0x5b, 0xf7, 0x59, 0x20, 0x46, 0x77, 0x66, 0xe8, 0xe2, 0xc4, 0x03, 0x09, 0xe9, 0x0b, 0xa8,
+ 0x1a, 0xd4, 0x63, 0xd4, 0x2c, 0x10, 0xc6, 0x26, 0x1e, 0x58, 0xce, 0xf3, 0x75, 0xdd, 0xf1, 0x89,
+ 0x70, 0x1e, 0x02, 0x33, 0xd9, 0xb5, 0xd7, 0x61, 0xed, 0x9a, 0x03, 0x62, 0xce, 0xd8, 0x02, 0x3f,
+ 0x0a, 0xb0, 0x76, 0x7b, 0x29, 0xe4, 0x19, 0x2c, 0xe7, 0x93, 0x35, 0xf3, 0x31, 0xc0, 0x01, 0xb7,
+ 0xa9, 0x2d, 0xea, 0x41, 0x96, 0xa4, 0x0f, 0x93, 0x33, 0xf2, 0x11, 0xd6, 0xf3, 0x96, 0x65, 0x1a,
+ 0x43, 0xa5, 0x2d, 0x13, 0xd2, 0xa2, 0x1e, 0xf3, 0x20, 0x29, 0xbf, 0x9e, 0x2f, 0x3f, 0x1d, 0x62,
+ 0x74, 0x35, 0xe7, 0x5e, 0xea, 0xf2, 0x8e, 0x93, 0x34, 0xef, 0x0d, 0x40, 0x76, 0x4b, 0xb2, 0x1d,
+ 0x0d, 0xac, 0x68, 0x17, 0x0d, 0xac, 0x62, 0xb3, 0xba, 0x47, 0x6e, 0xb6, 0x83, 0xa6, 0x90, 0x77,
+ 0xa5, 0x72, 0xb1, 0x56, 0xf2, 0x7e, 0x17, 0x60, 0x2e, 0x3e, 0x21, 0x1b, 0x00, 0x22, 0x64, 0xdc,
+ 0xf7, 0x35, 0x9a, 0x78, 0xe4, 0x2d, 0xd0, 0x8a, 0x08, 0xdf, 0xc6, 0x81, 0xc8, 0xfd, 0x91, 0x76,
+ 0x32, 0xf3, 0xdc, 0x3a, 0x32, 0xe3, 0x95, 0x4e, 0x5a, 0x75, 0x81, 0xd2, 0x99, 0xb1, 0x42, 0x6b,
+ 0xb9, 0x46, 0x9c, 0x46, 0x71, 0xb2, 0x0f, 0xcb, 0x77, 0x98, 0xae, 0x4c, 0x97, 0xfc, 0x29, 0x06,
+ 0x7b, 0x0e, 0x2b, 0x77, 0x19, 0xa9, 0x4c, 0xeb, 0xfe, 0x14, 0xd3, 0xb4, 0xe1, 0x73, 0x39, 0xfd,
+ 0x47, 0xf4, 0xe6, 0xdc, 0x4f, 0x62, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x36, 0x86,
+ 0xa6, 0x4a, 0x06, 0x00, 0x00,
+}
diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto
new file mode 100644
index 000000000..42d99c109
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto
@@ -0,0 +1,155 @@
+// Copyright 2016 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package grpc.lb.v1;
+option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages";
+
+message Duration {
+ // Signed seconds of the span of time. Must be from -315,576,000,000
+ // to +315,576,000,000 inclusive.
+ int64 seconds = 1;
+
+ // Signed fractions of a second at nanosecond resolution of the span
+ // of time. Durations less than one second are represented with a 0
+ // `seconds` field and a positive or negative `nanos` field. For durations
+ // of one second or more, a non-zero value for the `nanos` field must be
+ // of the same sign as the `seconds` field. Must be from -999,999,999
+ // to +999,999,999 inclusive.
+ int32 nanos = 2;
+}
+
+message Timestamp {
+ // Represents seconds of UTC time since Unix epoch
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+ // 9999-12-31T23:59:59Z inclusive.
+ int64 seconds = 1;
+
+ // Non-negative fractions of a second at nanosecond resolution. Negative
+ // second values with fractions must still have non-negative nanos values
+ // that count forward in time. Must be from 0 to 999,999,999
+ // inclusive.
+ int32 nanos = 2;
+}
+
+message LoadBalanceRequest {
+ oneof load_balance_request_type {
+ // This message should be sent on the first request to the load balancer.
+ InitialLoadBalanceRequest initial_request = 1;
+
+ // The client stats should be periodically reported to the load balancer
+ // based on the duration defined in the InitialLoadBalanceResponse.
+ ClientStats client_stats = 2;
+ }
+}
+
+message InitialLoadBalanceRequest {
+ // Name of load balanced service (IE, balancer.service.com)
+ // length should be less than 256 bytes.
+ string name = 1;
+}
+
+// Contains client level statistics that are useful to load balancing. Each
+// count except the timestamp should be reset to zero after reporting the stats.
+message ClientStats {
+ // The timestamp of generating the report.
+ Timestamp timestamp = 1;
+
+ // The total number of RPCs that started.
+ int64 num_calls_started = 2;
+
+ // The total number of RPCs that finished.
+ int64 num_calls_finished = 3;
+
+ // The total number of RPCs that were dropped by the client because of rate
+ // limiting.
+ int64 num_calls_finished_with_drop_for_rate_limiting = 4;
+
+ // The total number of RPCs that were dropped by the client because of load
+ // balancing.
+ int64 num_calls_finished_with_drop_for_load_balancing = 5;
+
+ // The total number of RPCs that failed to reach a server except dropped RPCs.
+ int64 num_calls_finished_with_client_failed_to_send = 6;
+
+ // The total number of RPCs that finished and are known to have been received
+ // by a server.
+ int64 num_calls_finished_known_received = 7;
+}
+
+message LoadBalanceResponse {
+ oneof load_balance_response_type {
+ // This message should be sent on the first response to the client.
+ InitialLoadBalanceResponse initial_response = 1;
+
+ // Contains the list of servers selected by the load balancer. The client
+ // should send requests to these servers in the specified order.
+ ServerList server_list = 2;
+ }
+}
+
+message InitialLoadBalanceResponse {
+ // This is an application layer redirect that indicates the client should use
+ // the specified server for load balancing. When this field is non-empty in
+ // the response, the client should open a separate connection to the
+ // load_balancer_delegate and call the BalanceLoad method. Its length should
+ // be less than 64 bytes.
+ string load_balancer_delegate = 1;
+
+ // This interval defines how often the client should send the client stats
+ // to the load balancer. Stats should only be reported when the duration is
+ // positive.
+ Duration client_stats_report_interval = 2;
+}
+
+message ServerList {
+ // Contains a list of servers selected by the load balancer. The list will
+ // be updated when server resolutions change or as needed to balance load
+ // across more servers. The client should consume the server list in order
+ // unless instructed otherwise via the client_config.
+ repeated Server servers = 1;
+
+ // Was google.protobuf.Duration expiration_interval.
+ reserved 3;
+}
+
+// Contains server information. When none of the [drop_for_*] fields are true,
+// use the other fields. When drop_for_rate_limiting is true, ignore all other
+// fields. Use drop_for_load_balancing only when it is true and
+// drop_for_rate_limiting is false.
+message Server {
+ // A resolved address for the server, serialized in network-byte-order. It may
+ // either be an IPv4 or IPv6 address.
+ bytes ip_address = 1;
+
+ // A resolved port number for the server.
+ int32 port = 2;
+
+ // An opaque but printable token given to the frontend for each pick. All
+ // frontend requests for that pick must include the token in its initial
+ // metadata. The token is used by the backend to verify the request and to
+ // allow the backend to report load to the gRPC LB system.
+ //
+ // Its length is variable but less than 50 bytes.
+ string load_balance_token = 3;
+
+ // Indicates whether this particular request should be dropped by the client
+ // for rate limiting.
+ bool drop_for_rate_limiting = 4;
+
+ // Indicates whether this particular request should be dropped by the client
+ // for load balancing.
+ bool drop_for_load_balancing = 5;
+}
diff --git a/vendor/google.golang.org/grpc/grpclb_picker.go b/vendor/google.golang.org/grpc/grpclb_picker.go
new file mode 100644
index 000000000..872c7ccea
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclb_picker.go
@@ -0,0 +1,159 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+ "google.golang.org/grpc/status"
+)
+
+type rpcStats struct {
+ NumCallsStarted int64
+ NumCallsFinished int64
+ NumCallsFinishedWithDropForRateLimiting int64
+ NumCallsFinishedWithDropForLoadBalancing int64
+ NumCallsFinishedWithClientFailedToSend int64
+ NumCallsFinishedKnownReceived int64
+}
+
+// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats.
+func (s *rpcStats) toClientStats() *lbpb.ClientStats {
+ stats := &lbpb.ClientStats{
+ NumCallsStarted: atomic.SwapInt64(&s.NumCallsStarted, 0),
+ NumCallsFinished: atomic.SwapInt64(&s.NumCallsFinished, 0),
+ NumCallsFinishedWithDropForRateLimiting: atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0),
+ NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0),
+ NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0),
+ NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0),
+ }
+ return stats
+}
+
+func (s *rpcStats) dropForRateLimiting() {
+ atomic.AddInt64(&s.NumCallsStarted, 1)
+ atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1)
+ atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) dropForLoadBalancing() {
+ atomic.AddInt64(&s.NumCallsStarted, 1)
+ atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1)
+ atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) failedToSend() {
+ atomic.AddInt64(&s.NumCallsStarted, 1)
+ atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1)
+ atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+func (s *rpcStats) knownReceived() {
+ atomic.AddInt64(&s.NumCallsStarted, 1)
+ atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1)
+ atomic.AddInt64(&s.NumCallsFinished, 1)
+}
+
+type errPicker struct {
+ // Pick always returns this err.
+ err error
+}
+
+func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ return nil, nil, p.err
+}
+
+// rrPicker does roundrobin on subConns. It's typically used when there's no
+// response from remote balancer, and grpclb falls back to the resolved
+// backends.
+//
+// It guaranteed that len(subConns) > 0.
+type rrPicker struct {
+ mu sync.Mutex
+ subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
+ subConnsNext int
+}
+
+func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ sc := p.subConns[p.subConnsNext]
+ p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
+ return sc, nil, nil
+}
+
+// lbPicker does two layers of picks:
+//
+// First layer: roundrobin on all servers in serverList, including drops and backends.
+// - If it picks a drop, the RPC will fail as being dropped.
+// - If it picks a backend, do a second layer pick to pick the real backend.
+//
+// Second layer: roundrobin on all READY backends.
+//
+// It's guaranteed that len(serverList) > 0.
+type lbPicker struct {
+ mu sync.Mutex
+ serverList []*lbpb.Server
+ serverListNext int
+ subConns []balancer.SubConn // The subConns that were READY when taking the snapshot.
+ subConnsNext int
+
+ stats *rpcStats
+}
+
+func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ // Layer one roundrobin on serverList.
+ s := p.serverList[p.serverListNext]
+ p.serverListNext = (p.serverListNext + 1) % len(p.serverList)
+
+ // If it's a drop, return an error and fail the RPC.
+ if s.DropForRateLimiting {
+ p.stats.dropForRateLimiting()
+ return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
+ }
+ if s.DropForLoadBalancing {
+ p.stats.dropForLoadBalancing()
+ return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb")
+ }
+
+ // If not a drop but there's no ready subConns.
+ if len(p.subConns) <= 0 {
+ return nil, nil, balancer.ErrNoSubConnAvailable
+ }
+
+ // Return the next ready subConn in the list, also collect rpc stats.
+ sc := p.subConns[p.subConnsNext]
+ p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns)
+ done := func(info balancer.DoneInfo) {
+ if !info.BytesSent {
+ p.stats.failedToSend()
+ } else if info.BytesReceived {
+ p.stats.knownReceived()
+ }
+ }
+ return sc, done, nil
+}
diff --git a/vendor/google.golang.org/grpc/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/grpclb_remote_balancer.go
new file mode 100644
index 000000000..1b580df26
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclb_remote_balancer.go
@@ -0,0 +1,254 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "time"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/connectivity"
+ lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
+)
+
+// processServerList updates balaner's internal state, create/remove SubConns
+// and regenerates picker using the received serverList.
+func (lb *lbBalancer) processServerList(l *lbpb.ServerList) {
+ grpclog.Infof("lbBalancer: processing server list: %+v", l)
+ lb.mu.Lock()
+ defer lb.mu.Unlock()
+
+ // Set serverListReceived to true so fallback will not take effect if it has
+ // not hit timeout.
+ lb.serverListReceived = true
+
+ // If the new server list == old server list, do nothing.
+ if reflect.DeepEqual(lb.fullServerList, l.Servers) {
+ grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring")
+ return
+ }
+ lb.fullServerList = l.Servers
+
+ var backendAddrs []resolver.Address
+ for _, s := range l.Servers {
+ if s.DropForLoadBalancing || s.DropForRateLimiting {
+ continue
+ }
+
+ md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken)
+ ip := net.IP(s.IpAddress)
+ ipStr := ip.String()
+ if ip.To4() == nil {
+ // Add square brackets to ipv6 addresses, otherwise net.Dial() and
+ // net.SplitHostPort() will return too many colons error.
+ ipStr = fmt.Sprintf("[%s]", ipStr)
+ }
+ addr := resolver.Address{
+ Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
+ Metadata: &md,
+ }
+
+ backendAddrs = append(backendAddrs, addr)
+ }
+
+ // Call refreshSubConns to create/remove SubConns.
+ backendsUpdated := lb.refreshSubConns(backendAddrs)
+ // If no backend was updated, no SubConn will be newed/removed. But since
+ // the full serverList was different, there might be updates in drops or
+ // pick weights(different number of duplicates). We need to update picker
+ // with the fulllist.
+ if !backendsUpdated {
+ lb.regeneratePicker()
+ lb.cc.UpdateBalancerState(lb.state, lb.picker)
+ }
+}
+
+// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool
+// indicating whether the backendAddrs are different from the cached
+// backendAddrs (whether any SubConn was newed/removed).
+// Caller must hold lb.mu.
+func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool {
+ lb.backendAddrs = nil
+ var backendsUpdated bool
+ // addrsSet is the set converted from backendAddrs, it's used to quick
+ // lookup for an address.
+ addrsSet := make(map[resolver.Address]struct{})
+ // Create new SubConns.
+ for _, addr := range backendAddrs {
+ addrWithoutMD := addr
+ addrWithoutMD.Metadata = nil
+ addrsSet[addrWithoutMD] = struct{}{}
+ lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD)
+
+ if _, ok := lb.subConns[addrWithoutMD]; !ok {
+ backendsUpdated = true
+
+ // Use addrWithMD to create the SubConn.
+ sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
+ if err != nil {
+ grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err)
+ continue
+ }
+ lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map.
+ lb.scStates[sc] = connectivity.Idle
+ sc.Connect()
+ }
+ }
+
+ for a, sc := range lb.subConns {
+ // a was removed by resolver.
+ if _, ok := addrsSet[a]; !ok {
+ backendsUpdated = true
+
+ lb.cc.RemoveSubConn(sc)
+ delete(lb.subConns, a)
+ // Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
+ // The entry will be deleted in HandleSubConnStateChange.
+ }
+ }
+
+ return backendsUpdated
+}
+
+func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error {
+ for {
+ reply, err := s.Recv()
+ if err != nil {
+ return fmt.Errorf("grpclb: failed to recv server list: %v", err)
+ }
+ if serverList := reply.GetServerList(); serverList != nil {
+ lb.processServerList(serverList)
+ }
+ }
+}
+
+func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ case <-s.Context().Done():
+ return
+ }
+ stats := lb.clientStats.toClientStats()
+ t := time.Now()
+ stats.Timestamp = &lbpb.Timestamp{
+ Seconds: t.Unix(),
+ Nanos: int32(t.Nanosecond()),
+ }
+ if err := s.Send(&lbpb.LoadBalanceRequest{
+ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
+ ClientStats: stats,
+ },
+ }); err != nil {
+ return
+ }
+ }
+}
+func (lb *lbBalancer) callRemoteBalancer() error {
+ lbClient := &loadBalancerClient{cc: lb.ccRemoteLB}
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ stream, err := lbClient.BalanceLoad(ctx, FailFast(false))
+ if err != nil {
+ return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
+ }
+
+ // grpclb handshake on the stream.
+ initReq := &lbpb.LoadBalanceRequest{
+ LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
+ InitialRequest: &lbpb.InitialLoadBalanceRequest{
+ Name: lb.target,
+ },
+ },
+ }
+ if err := stream.Send(initReq); err != nil {
+ return fmt.Errorf("grpclb: failed to send init request: %v", err)
+ }
+ reply, err := stream.Recv()
+ if err != nil {
+ return fmt.Errorf("grpclb: failed to recv init response: %v", err)
+ }
+ initResp := reply.GetInitialResponse()
+ if initResp == nil {
+ return fmt.Errorf("grpclb: reply from remote balancer did not include initial response")
+ }
+ if initResp.LoadBalancerDelegate != "" {
+ return fmt.Errorf("grpclb: Delegation is not supported")
+ }
+
+ go func() {
+ if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
+ lb.sendLoadReport(stream, d)
+ }
+ }()
+ return lb.readServerList(stream)
+}
+
+func (lb *lbBalancer) watchRemoteBalancer() {
+ for {
+ err := lb.callRemoteBalancer()
+ select {
+ case <-lb.doneCh:
+ return
+ default:
+ if err != nil {
+ grpclog.Error(err)
+ }
+ }
+
+ }
+}
+
+func (lb *lbBalancer) dialRemoteLB(remoteLBName string) {
+ var dopts []DialOption
+ if creds := lb.opt.DialCreds; creds != nil {
+ if err := creds.OverrideServerName(remoteLBName); err == nil {
+ dopts = append(dopts, WithTransportCredentials(creds))
+ } else {
+ grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err)
+ dopts = append(dopts, WithInsecure())
+ }
+ } else {
+ dopts = append(dopts, WithInsecure())
+ }
+ if lb.opt.Dialer != nil {
+ // WithDialer takes a different type of function, so we instead use a
+ // special DialOption here.
+ dopts = append(dopts, withContextDialer(lb.opt.Dialer))
+ }
+ // Explicitly set pickfirst as the balancer.
+ dopts = append(dopts, WithBalancerName(PickFirstBalancerName))
+ dopts = append(dopts, withResolverBuilder(lb.manualResolver))
+ // Dial using manualResolver.Scheme, which is a random scheme generated
+ // when init grpclb. The target name is not important.
+ cc, err := Dial("grpclb:///grpclb.server", dopts...)
+ if err != nil {
+ grpclog.Fatalf("failed to dial: %v", err)
+ }
+ lb.ccRemoteLB = cc
+ go lb.watchRemoteBalancer()
+}
diff --git a/vendor/google.golang.org/grpc/grpclb_util.go b/vendor/google.golang.org/grpc/grpclb_util.go
new file mode 100644
index 000000000..93ab2db32
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclb_util.go
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2016 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/resolver"
+)
+
+// The parent ClientConn should re-resolve when grpclb loses connection to the
+// remote balancer. When the ClientConn inside grpclb gets a TransientFailure,
+// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's
+// ResolveNow, and eventually results in re-resolve happening in parent
+// ClientConn's resolver (DNS for example).
+//
+// parent
+// ClientConn
+// +-----------------------------------------------------------------+
+// | parent +---------------------------------+ |
+// | DNS ClientConn | grpclb | |
+// | resolver balancerWrapper | | |
+// | + + | grpclb grpclb | |
+// | | | | ManualResolver ClientConn | |
+// | | | | + + | |
+// | | | | | | Transient | |
+// | | | | | | Failure | |
+// | | | | | <--------- | | |
+// | | | <--------------- | ResolveNow | | |
+// | | <--------- | ResolveNow | | | | |
+// | | ResolveNow | | | | | |
+// | | | | | | | |
+// | + + | + + | |
+// | +---------------------------------+ |
+// +-----------------------------------------------------------------+
+
+// lbManualResolver is used by the ClientConn inside grpclb. It's a manual
+// resolver with a special ResolveNow() function.
+//
+// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn,
+// so when grpclb client lose contact with remote balancers, the parent
+// ClientConn's resolver will re-resolve.
+type lbManualResolver struct {
+ scheme string
+ ccr resolver.ClientConn
+
+ ccb balancer.ClientConn
+}
+
+func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) {
+ r.ccr = cc
+ return r, nil
+}
+
+func (r *lbManualResolver) Scheme() string {
+ return r.scheme
+}
+
+// ResolveNow calls resolveNow on the parent ClientConn.
+func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) {
+ r.ccb.ResolveNow(o)
+}
+
+// Close is a noop for Resolver.
+func (*lbManualResolver) Close() {}
+
+// NewAddress calls cc.NewAddress.
+func (r *lbManualResolver) NewAddress(addrs []resolver.Address) {
+ r.ccr.NewAddress(addrs)
+}
+
+// NewServiceConfig calls cc.NewServiceConfig.
+func (r *lbManualResolver) NewServiceConfig(sc string) {
+ r.ccr.NewServiceConfig(sc)
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 1fabb11e1..16a7d8886 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -105,21 +105,18 @@ func Fatalln(args ...interface{}) {
}
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
-//
// Deprecated: use Info.
func Print(args ...interface{}) {
logger.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
-//
// Deprecated: use Infof.
func Printf(format string, args ...interface{}) {
logger.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
-//
// Deprecated: use Infoln.
func Println(args ...interface{}) {
logger.Infoln(args...)
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
index 097494f71..d03b2397b 100644
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -19,7 +19,6 @@
package grpclog
// Logger mimics golang's standard Logger as an interface.
-//
// Deprecated: use LoggerV2.
type Logger interface {
Fatal(args ...interface{})
@@ -32,7 +31,6 @@ type Logger interface {
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
-//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
logger = &loggerWrapper{Logger: l}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index c2f2c7729..fdcbb9e0b 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -1,7 +1,17 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc/health/v1/health.proto
+// source: grpc_health_v1/health.proto
-package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1"
+/*
+Package grpc_health_v1 is a generated protocol buffer package.
+
+It is generated from these files:
+ grpc_health_v1/health.proto
+
+It has these top-level messages:
+ HealthCheckRequest
+ HealthCheckResponse
+*/
+package grpc_health_v1
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -26,62 +36,37 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type HealthCheckResponse_ServingStatus int32
const (
- HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
- HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
- HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
- HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3
+ HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
+ HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
+ HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
)
var HealthCheckResponse_ServingStatus_name = map[int32]string{
0: "UNKNOWN",
1: "SERVING",
2: "NOT_SERVING",
- 3: "SERVICE_UNKNOWN",
}
var HealthCheckResponse_ServingStatus_value = map[string]int32{
- "UNKNOWN": 0,
- "SERVING": 1,
- "NOT_SERVING": 2,
- "SERVICE_UNKNOWN": 3,
+ "UNKNOWN": 0,
+ "SERVING": 1,
+ "NOT_SERVING": 2,
}
func (x HealthCheckResponse_ServingStatus) String() string {
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
}
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_health_6b1a06aa67f91efd, []int{1, 0}
+ return fileDescriptor0, []int{1, 0}
}
type HealthCheckRequest struct {
- Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
-func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
-func (*HealthCheckRequest) ProtoMessage() {}
-func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_health_6b1a06aa67f91efd, []int{0}
-}
-func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
-}
-func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
-}
-func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HealthCheckRequest.Merge(dst, src)
-}
-func (m *HealthCheckRequest) XXX_Size() int {
- return xxx_messageInfo_HealthCheckRequest.Size(m)
-}
-func (m *HealthCheckRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
+ Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"`
}
-var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
+func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
+func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
+func (*HealthCheckRequest) ProtoMessage() {}
+func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *HealthCheckRequest) GetService() string {
if m != nil {
@@ -91,35 +76,13 @@ func (m *HealthCheckRequest) GetService() string {
}
type HealthCheckResponse struct {
- Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
}
-func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
-func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
-func (*HealthCheckResponse) ProtoMessage() {}
-func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_health_6b1a06aa67f91efd, []int{1}
-}
-func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
-}
-func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
-}
-func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HealthCheckResponse.Merge(dst, src)
-}
-func (m *HealthCheckResponse) XXX_Size() int {
- return xxx_messageInfo_HealthCheckResponse.Size(m)
-}
-func (m *HealthCheckResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
+func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
+func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
+func (*HealthCheckResponse) ProtoMessage() {}
+func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
if m != nil {
@@ -142,29 +105,10 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
-// HealthClient is the client API for Health service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+// Client API for Health service
+
type HealthClient interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
- // Performs a watch for the serving status of the requested service.
- // The server will immediately send back a message indicating the current
- // serving status. It will then subsequently send a new message whenever
- // the service's serving status changes.
- //
- // If the requested service is unknown when the call is received, the
- // server will send a message setting the serving status to
- // SERVICE_UNKNOWN but will *not* terminate the call. If at some
- // future point, the serving status of the service becomes known, the
- // server will send a new message with the service's serving status.
- //
- // If the call terminates with status UNIMPLEMENTED, then clients
- // should assume this method is not supported and should not retry the
- // call. If the call terminates with any other status (including OK),
- // clients should retry the call with appropriate exponential backoff.
- Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
}
type healthClient struct {
@@ -177,66 +121,17 @@ func NewHealthClient(cc *grpc.ClientConn) HealthClient {
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
+ err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
- if err != nil {
- return nil, err
- }
- x := &healthWatchClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Health_WatchClient interface {
- Recv() (*HealthCheckResponse, error)
- grpc.ClientStream
-}
-
-type healthWatchClient struct {
- grpc.ClientStream
-}
+// Server API for Health service
-func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
- m := new(HealthCheckResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// HealthServer is the server API for Health service.
type HealthServer interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
- // Performs a watch for the serving status of the requested service.
- // The server will immediately send back a message indicating the current
- // serving status. It will then subsequently send a new message whenever
- // the service's serving status changes.
- //
- // If the requested service is unknown when the call is received, the
- // server will send a message setting the serving status to
- // SERVICE_UNKNOWN but will *not* terminate the call. If at some
- // future point, the serving status of the service becomes known, the
- // server will send a new message with the service's serving status.
- //
- // If the call terminates with status UNIMPLEMENTED, then clients
- // should assume this method is not supported and should not retry the
- // call. If the call terminates with any other status (including OK),
- // clients should retry the call with appropriate exponential backoff.
- Watch(*HealthCheckRequest, Health_WatchServer) error
}
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
@@ -261,27 +156,6 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf
return interceptor(ctx, in, info, handler)
}
-func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(HealthCheckRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
-}
-
-type Health_WatchServer interface {
- Send(*HealthCheckResponse) error
- grpc.ServerStream
-}
-
-type healthWatchServer struct {
- grpc.ServerStream
-}
-
-func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
var _Health_serviceDesc = grpc.ServiceDesc{
ServiceName: "grpc.health.v1.Health",
HandlerType: (*HealthServer)(nil),
@@ -291,37 +165,26 @@ var _Health_serviceDesc = grpc.ServiceDesc{
Handler: _Health_Check_Handler,
},
},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Watch",
- Handler: _Health_Watch_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "grpc/health/v1/health.proto",
+ Streams: []grpc.StreamDesc{},
+ Metadata: "grpc_health_v1/health.proto",
}
-func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_health_6b1a06aa67f91efd) }
+func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) }
-var fileDescriptor_health_6b1a06aa67f91efd = []byte{
- // 297 bytes of a gzipped FileDescriptorProto
+var fileDescriptor0 = []byte{
+ // 213 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
- 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
- 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
- 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
- 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
- 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
- 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
- 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
- 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
- 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
- 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
- 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
- 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
- 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
- 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
- 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
- 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
- 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
- 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
+ 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a,
+ 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21,
+ 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48,
+ 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33,
+ 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55,
+ 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f,
+ 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41,
+ 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3,
+ 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85,
+ 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b,
+ 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44,
+ 0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65,
+ 0x20, 0x60, 0x01, 0x00, 0x00,
}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
new file mode 100644
index 000000000..6072fdc3b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto
@@ -0,0 +1,34 @@
+// Copyright 2017 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package grpc.health.v1;
+
+message HealthCheckRequest {
+ string service = 1;
+}
+
+message HealthCheckResponse {
+ enum ServingStatus {
+ UNKNOWN = 0;
+ SERVING = 1;
+ NOT_SERVING = 2;
+ }
+ ServingStatus status = 1;
+}
+
+service Health{
+ rpc Check(HealthCheckRequest) returns (HealthCheckResponse);
+}
diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go
index 10666f2d3..30a78667e 100644
--- a/vendor/google.golang.org/grpc/health/health.go
+++ b/vendor/google.golang.org/grpc/health/health.go
@@ -16,7 +16,7 @@
*
*/
-//go:generate ./regenerate.sh
+//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto
// Package health provides some utility functions to health-check a server. The implementation
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
@@ -36,14 +36,12 @@ type Server struct {
mu sync.Mutex
// statusMap stores the serving status of the services this Server monitors.
statusMap map[string]healthpb.HealthCheckResponse_ServingStatus
- updates map[string]map[healthpb.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus
}
// NewServer returns a new Server.
func NewServer() *Server {
return &Server{
- statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING},
- updates: make(map[string]map[healthpb.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus),
+ statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus),
}
}
@@ -51,67 +49,24 @@ func NewServer() *Server {
func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
- if servingStatus, ok := s.statusMap[in.Service]; ok {
+ if in.Service == "" {
+ // check the server overall health status.
return &healthpb.HealthCheckResponse{
- Status: servingStatus,
+ Status: healthpb.HealthCheckResponse_SERVING,
}, nil
}
- return nil, status.Error(codes.NotFound, "unknown service")
-}
-
-// Watch implements `service Health`.
-func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthpb.Health_WatchServer) error {
- service := in.Service
- // update channel is used for getting service status updates.
- update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1)
- s.mu.Lock()
- // Puts the initial status to the channel.
- if servingStatus, ok := s.statusMap[service]; ok {
- update <- servingStatus
- } else {
- update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN
- }
-
- // Registers the update channel to the correct place in the updates map.
- if _, ok := s.updates[service]; !ok {
- s.updates[service] = make(map[healthpb.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus)
- }
- s.updates[service][stream] = update
- defer func() {
- s.mu.Lock()
- delete(s.updates[service], stream)
- s.mu.Unlock()
- }()
- s.mu.Unlock()
- for {
- select {
- // Status updated. Sends the up-to-date status to the client.
- case servingStatus := <-update:
- err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus})
- if err != nil {
- return status.Error(codes.Canceled, "Stream has ended.")
- }
- // Context done. Removes the update channel from the updates map.
- case <-stream.Context().Done():
- return status.Error(codes.Canceled, "Stream has ended.")
- }
+ if status, ok := s.statusMap[in.Service]; ok {
+ return &healthpb.HealthCheckResponse{
+ Status: status,
+ }, nil
}
+ return nil, status.Error(codes.NotFound, "unknown service")
}
// SetServingStatus is called when need to reset the serving status of a service
// or insert a new service entry into the statusMap.
-func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) {
+func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) {
s.mu.Lock()
- s.statusMap[service] = servingStatus
- for _, update := range s.updates[service] {
- // Clears previous updates, that are not sent to the client, from the channel.
- // This can happen if the client is not reading and the server gets flow control limited.
- select {
- case <-update:
- default:
- }
- // Puts the most recent update to the channel.
- update <- servingStatus
- }
+ s.statusMap[service] = status
s.mu.Unlock()
}
diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh
deleted file mode 100755
index b11eccb29..000000000
--- a/vendor/google.golang.org/grpc/health/regenerate.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# Copyright 2018 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eux -o pipefail
-
-TMP=$(mktemp -d)
-
-function finish {
- rm -rf "$TMP"
-}
-trap finish EXIT
-
-pushd "$TMP"
-mkdir -p grpc/health/v1
-curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto
-
-protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto
-popd
-rm -f grpc_health_v1/*.pb.go
-cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/
-
diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh
deleted file mode 100755
index 7c7bcada5..000000000
--- a/vendor/google.golang.org/grpc/install_gae.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-TMP=$(mktemp -d /tmp/sdk.XXX) \
-&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
-&& unzip -q $TMP.zip -d $TMP \
-&& export PATH="$PATH:$TMP/go_appengine"
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
index 1f6ef6780..06dc825b9 100644
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -48,9 +48,7 @@ type UnaryServerInfo struct {
}
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
-// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
-// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
-// the status message of the RPC.
+// execution of a unary RPC.
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
deleted file mode 100644
index 1bd0cce5a..000000000
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package backoff implement the backoff strategy for gRPC.
-//
-// This is kept in internal until the gRPC project decides whether or not to
-// allow alternative backoff strategies.
-package backoff
-
-import (
- "time"
-
- "google.golang.org/grpc/internal/grpcrand"
-)
-
-// Strategy defines the methodology for backing off after a grpc connection
-// failure.
-//
-type Strategy interface {
- // Backoff returns the amount of time to wait before the next retry given
- // the number of consecutive failures.
- Backoff(retries int) time.Duration
-}
-
-const (
- // baseDelay is the amount of time to wait before retrying after the first
- // failure.
- baseDelay = 1.0 * time.Second
- // factor is applied to the backoff after each retry.
- factor = 1.6
- // jitter provides a range to randomize backoff delays.
- jitter = 0.2
-)
-
-// Exponential implements exponential backoff algorithm as defined in
-// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
-type Exponential struct {
- // MaxDelay is the upper bound of backoff delay.
- MaxDelay time.Duration
-}
-
-// Backoff returns the amount of time to wait before the next retry given the
-// number of retries.
-func (bc Exponential) Backoff(retries int) time.Duration {
- if retries == 0 {
- return baseDelay
- }
- backoff, max := float64(baseDelay), float64(bc.MaxDelay)
- for backoff < max && retries > 0 {
- backoff *= factor
- retries--
- }
- if backoff > max {
- backoff = max
- }
- // Randomize backoff delays so that if a cluster of requests start at
- // the same time, they won't operate in lockstep.
- backoff *= 1 + jitter*(grpcrand.Float64()*2-1)
- if backoff < 0 {
- return 0
- }
- return time.Duration(backoff)
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
deleted file mode 100644
index 4c80e7c1f..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ /dev/null
@@ -1,141 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package binarylog implementation binary logging as defined in
-// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
-package binarylog
-
-import (
- "fmt"
- "os"
-
- "google.golang.org/grpc/grpclog"
-)
-
-// Logger is the global binary logger for the binary. One of this should be
-// built at init time from the configuration (environment varialbe or flags).
-//
-// It is used to get a methodLogger for each individual method.
-var Logger *logger
-
-func init() {
- const envStr = "GRPC_BINARY_LOG_FILTER"
- configStr := os.Getenv(envStr)
- Logger = newLoggerFromConfigString(configStr)
-}
-
-type methodLoggerConfig struct {
- // Max length of header and message.
- hdr, msg uint64
-}
-
-type logger struct {
- all *methodLoggerConfig
- services map[string]*methodLoggerConfig
- methods map[string]*methodLoggerConfig
-
- blacklist map[string]struct{}
-}
-
-// newEmptyLogger creates an empty logger. The map fields need to be filled in
-// using the set* functions.
-func newEmptyLogger() *logger {
- return &logger{}
-}
-
-// Set method logger for "*".
-func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
- if l.all != nil {
- return fmt.Errorf("conflicting global rules found")
- }
- l.all = ml
- return nil
-}
-
-// Set method logger for "service/*".
-//
-// New methodLogger with same service overrides the old one.
-func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
- if _, ok := l.services[service]; ok {
- return fmt.Errorf("conflicting rules for service %v found", service)
- }
- if l.services == nil {
- l.services = make(map[string]*methodLoggerConfig)
- }
- l.services[service] = ml
- return nil
-}
-
-// Set method logger for "service/method".
-//
-// New methodLogger with same method overrides the old one.
-func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
- if _, ok := l.blacklist[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
- }
- if _, ok := l.methods[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
- }
- if l.methods == nil {
- l.methods = make(map[string]*methodLoggerConfig)
- }
- l.methods[method] = ml
- return nil
-}
-
-// Set blacklist method for "-service/method".
-func (l *logger) setBlacklist(method string) error {
- if _, ok := l.blacklist[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
- }
- if _, ok := l.methods[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
- }
- if l.blacklist == nil {
- l.blacklist = make(map[string]struct{})
- }
- l.blacklist[method] = struct{}{}
- return nil
-}
-
-// GetMethodLogger returns the methodLogger for the given methodName.
-//
-// methodName should be in the format of "/service/method".
-//
-// Each methodLogger returned by this method is a new instance. This is to
-// generate sequence id within the call.
-func (l *logger) GetMethodLogger(methodName string) *MethodLogger {
- s, m, err := parseMethodName(methodName)
- if err != nil {
- grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
- return nil
- }
- if ml, ok := l.methods[s+"/"+m]; ok {
- return newMethodLogger(ml.hdr, ml.msg)
- }
- if _, ok := l.blacklist[s+"/"+m]; ok {
- return nil
- }
- if ml, ok := l.services[s]; ok {
- return newMethodLogger(ml.hdr, ml.msg)
- }
- if l.all == nil {
- return nil
- }
- return newMethodLogger(l.all.hdr, l.all.msg)
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
deleted file mode 100644
index ad234706c..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "errors"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "google.golang.org/grpc/grpclog"
-)
-
-// newLoggerFromConfigString reads the string and build a logger.
-//
-// Example filter config strings:
-// - "" Nothing will be logged
-// - "*" All headers and messages will be fully logged.
-// - "*{h}" Only headers will be logged.
-// - "*{m:256}" Only the first 256 bytes of each message will be logged.
-// - "Foo/*" Logs every method in service Foo
-// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
-// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
-// /Foo/Bar, logs all headers and messages in every other method in service
-// Foo.
-//
-// If two configs exist for one certain method or service, the one specified
-// later overrides the privous config.
-func newLoggerFromConfigString(s string) *logger {
- l := newEmptyLogger()
- methods := strings.Split(s, ",")
- for _, method := range methods {
- if err := l.fillMethodLoggerWithConfigString(method); err != nil {
- grpclog.Warningf("failed to parse binary log config: %v", err)
- return nil
- }
- }
- return l
-}
-
-// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
-// it to the right map in the logger.
-func (l *logger) fillMethodLoggerWithConfigString(config string) error {
- // "" is invalid.
- if config == "" {
- return errors.New("empty string is not a valid method binary logging config")
- }
-
- // "-service/method", blacklist, no * or {} allowed.
- if config[0] == '-' {
- s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
- if err != nil {
- return fmt.Errorf("invalid config: %q, %v", config, err)
- }
- if m == "*" {
- return fmt.Errorf("invalid config: %q, %v", config, "* not allowd in blacklist config")
- }
- if suffix != "" {
- return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
- }
- if err := l.setBlacklist(s + "/" + m); err != nil {
- return fmt.Errorf("invalid config: %v", err)
- }
- return nil
- }
-
- // "*{h:256;m:256}"
- if config[0] == '*' {
- hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
- if err != nil {
- return fmt.Errorf("invalid config: %q, %v", config, err)
- }
- if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
- return fmt.Errorf("invalid config: %v", err)
- }
- return nil
- }
-
- s, m, suffix, err := parseMethodConfigAndSuffix(config)
- if err != nil {
- return fmt.Errorf("invalid config: %q, %v", config, err)
- }
- hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
- if err != nil {
- return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
- }
- if m == "*" {
- if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
- return fmt.Errorf("invalid config: %v", err)
- }
- } else {
- if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
- return fmt.Errorf("invalid config: %v", err)
- }
- }
- return nil
-}
-
-const (
- // TODO: this const is only used by env_config now. But could be useful for
- // other config. Move to binarylog.go if necessary.
- maxUInt = ^uint64(0)
-
- // For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
- // expected output.
- longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
-
- // For suffix from above, "{h:123,m:123}". See test for expected output.
- optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
- headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
- messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
- headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
-)
-
-var (
- longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
- headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
- messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
- headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
-)
-
-// Turn "service/method{h;m}" into "service", "method", "{h;m}".
-func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
- // Regexp result:
- //
- // in: "p.s/m{h:123,m:123}",
- // out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
- match := longMethodConfigRegexp.FindStringSubmatch(c)
- if match == nil {
- return "", "", "", fmt.Errorf("%q contains invalid substring", c)
- }
- service = match[1]
- method = match[2]
- suffix = match[3]
- return
-}
-
-// Turn "{h:123;m:345}" into 123, 345.
-//
-// Return maxUInt if length is unspecified.
-func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
- if c == "" {
- return maxUInt, maxUInt, nil
- }
- // Header config only.
- if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
- if s := match[1]; s != "" {
- hdrLenStr, err = strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
- }
- return hdrLenStr, 0, nil
- }
- return maxUInt, 0, nil
- }
-
- // Message config only.
- if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
- if s := match[1]; s != "" {
- msgLenStr, err = strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
- }
- return 0, msgLenStr, nil
- }
- return 0, maxUInt, nil
- }
-
- // Header and message config both.
- if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
- // Both hdr and msg are specified, but one or two of them might be empty.
- hdrLenStr = maxUInt
- msgLenStr = maxUInt
- if s := match[1]; s != "" {
- hdrLenStr, err = strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
- }
- }
- if s := match[2]; s != "" {
- msgLenStr, err = strconv.ParseUint(s, 10, 64)
- if err != nil {
- return 0, 0, fmt.Errorf("Failed to convert %q to uint", s)
- }
- }
- return hdrLenStr, msgLenStr, nil
- }
- return 0, 0, fmt.Errorf("%q contains invalid substring", c)
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
deleted file mode 100644
index 9590b8589..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "net"
- "strings"
- "sync/atomic"
- "time"
-
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-type callIDGenerator struct {
- id uint64
-}
-
-func (g *callIDGenerator) next() uint64 {
- id := atomic.AddUint64(&g.id, 1)
- return id
-}
-
-// reset is for testing only, and doesn't need to be thread safe.
-func (g *callIDGenerator) reset() {
- g.id = 0
-}
-
-var idGen callIDGenerator
-
-// MethodLogger is the sub-logger for each method.
-type MethodLogger struct {
- headerMaxLen, messageMaxLen uint64
-
- callID uint64
- idWithinCallGen *callIDGenerator
-
- sink Sink // TODO(blog): make this plugable.
-}
-
-func newMethodLogger(h, m uint64) *MethodLogger {
- return &MethodLogger{
- headerMaxLen: h,
- messageMaxLen: m,
-
- callID: idGen.next(),
- idWithinCallGen: &callIDGenerator{},
-
- sink: defaultSink, // TODO(blog): make it plugable.
- }
-}
-
-// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *MethodLogger) Log(c LogEntryConfig) {
- m := c.toProto()
- timestamp, _ := ptypes.TimestampProto(time.Now())
- m.Timestamp = timestamp
- m.CallId = ml.callID
- m.SequenceIdWithinCall = ml.idWithinCallGen.next()
-
- switch pay := m.Payload.(type) {
- case *pb.GrpcLogEntry_ClientHeader:
- m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
- case *pb.GrpcLogEntry_ServerHeader:
- m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
- case *pb.GrpcLogEntry_Message:
- m.PayloadTruncated = ml.truncateMessage(pay.Message)
- }
-
- ml.sink.Write(m)
-}
-
-func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
- if ml.headerMaxLen == maxUInt {
- return false
- }
- var (
- bytesLimit = ml.headerMaxLen
- index int
- )
- // At the end of the loop, index will be the first entry where the total
- // size is greater than the limit:
- //
- // len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
- for ; index < len(mdPb.Entry); index++ {
- entry := mdPb.Entry[index]
- if entry.Key == "grpc-trace-bin" {
- // "grpc-trace-bin" is a special key. It's kept in the log entry,
- // but not counted towards the size limit.
- continue
- }
- currentEntryLen := uint64(len(entry.Value))
- if currentEntryLen > bytesLimit {
- break
- }
- bytesLimit -= currentEntryLen
- }
- truncated = index < len(mdPb.Entry)
- mdPb.Entry = mdPb.Entry[:index]
- return truncated
-}
-
-func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
- if ml.messageMaxLen == maxUInt {
- return false
- }
- if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
- return false
- }
- msgPb.Data = msgPb.Data[:ml.messageMaxLen]
- return true
-}
-
-// LogEntryConfig represents the configuration for binary log entry.
-type LogEntryConfig interface {
- toProto() *pb.GrpcLogEntry
-}
-
-// ClientHeader configs the binary log entry to be a ClientHeader entry.
-type ClientHeader struct {
- OnClientSide bool
- Header metadata.MD
- MethodName string
- Authority string
- Timeout time.Duration
- // PeerAddr is required only when it's on server side.
- PeerAddr net.Addr
-}
-
-func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
- // This function doesn't need to set all the fields (e.g. seq ID). The Log
- // function will set the fields when necessary.
- clientHeader := &pb.ClientHeader{
- Metadata: mdToMetadataProto(c.Header),
- MethodName: c.MethodName,
- Authority: c.Authority,
- }
- if c.Timeout > 0 {
- clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
- }
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
- Payload: &pb.GrpcLogEntry_ClientHeader{
- ClientHeader: clientHeader,
- },
- }
- if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
- }
- if c.PeerAddr != nil {
- ret.Peer = addrToProto(c.PeerAddr)
- }
- return ret
-}
-
-// ServerHeader configs the binary log entry to be a ServerHeader entry.
-type ServerHeader struct {
- OnClientSide bool
- Header metadata.MD
- // PeerAddr is required only when it's on client side.
- PeerAddr net.Addr
-}
-
-func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
- Payload: &pb.GrpcLogEntry_ServerHeader{
- ServerHeader: &pb.ServerHeader{
- Metadata: mdToMetadataProto(c.Header),
- },
- },
- }
- if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
- }
- if c.PeerAddr != nil {
- ret.Peer = addrToProto(c.PeerAddr)
- }
- return ret
-}
-
-// ClientMessage configs the binary log entry to be a ClientMessage entry.
-type ClientMessage struct {
- OnClientSide bool
- // Message should only be a proto.Message. Could add support for other
- // message types in the future.
- Message interface{}
-}
-
-func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
- var (
- data []byte
- err error
- )
- if m, ok := c.Message.(proto.Message); ok {
- data, err = proto.Marshal(m)
- if err != nil {
- grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
- }
- } else if b, ok := c.Message.([]byte); ok {
- data = b
- } else {
- grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
- }
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
- Payload: &pb.GrpcLogEntry_Message{
- Message: &pb.Message{
- Length: uint32(len(data)),
- Data: data,
- },
- },
- }
- if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
- }
- return ret
-}
-
-// ServerMessage configs the binary log entry to be a ServerMessage entry.
-type ServerMessage struct {
- OnClientSide bool
- // Message should only be a proto.Message. Could add support for other
- // message types in the future.
- Message interface{}
-}
-
-func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
- var (
- data []byte
- err error
- )
- if m, ok := c.Message.(proto.Message); ok {
- data, err = proto.Marshal(m)
- if err != nil {
- grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
- }
- } else if b, ok := c.Message.([]byte); ok {
- data = b
- } else {
- grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
- }
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
- Payload: &pb.GrpcLogEntry_Message{
- Message: &pb.Message{
- Length: uint32(len(data)),
- Data: data,
- },
- },
- }
- if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
- }
- return ret
-}
-
-// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
-type ClientHalfClose struct {
- OnClientSide bool
-}
-
-func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
- Payload: nil, // No payload here.
- }
- if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
- }
- return ret
-}
-
-// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
-type ServerTrailer struct {
- OnClientSide bool
- Trailer metadata.MD
- // Err is the status error.
- Err error
- // PeerAddr is required only when it's on client side and the RPC is trailer
- // only.
- PeerAddr net.Addr
-}
-
-func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
- st, ok := status.FromError(c.Err)
- if !ok {
- grpclog.Info("binarylogging: error in trailer is not a status error")
- }
- var (
- detailsBytes []byte
- err error
- )
- stProto := st.Proto()
- if stProto != nil && len(stProto.Details) != 0 {
- detailsBytes, err = proto.Marshal(stProto)
- if err != nil {
- grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
- }
- }
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
- Payload: &pb.GrpcLogEntry_Trailer{
- Trailer: &pb.Trailer{
- Metadata: mdToMetadataProto(c.Trailer),
- StatusCode: uint32(st.Code()),
- StatusMessage: st.Message(),
- StatusDetails: detailsBytes,
- },
- },
- }
- if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
- }
- if c.PeerAddr != nil {
- ret.Peer = addrToProto(c.PeerAddr)
- }
- return ret
-}
-
-// Cancel configs the binary log entry to be a Cancel entry.
-type Cancel struct {
- OnClientSide bool
-}
-
-func (c *Cancel) toProto() *pb.GrpcLogEntry {
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
- Payload: nil,
- }
- if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
- } else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
- }
- return ret
-}
-
-// metadataKeyOmit returns whether the metadata entry with this key should be
-// omitted.
-func metadataKeyOmit(key string) bool {
- switch key {
- case "lb-token", ":path", ":authority", "content-encoding", "user-agent", "te":
- return true
- case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
- return false
- }
- if strings.HasPrefix(key, "grpc-") {
- return true
- }
- return false
-}
-
-func mdToMetadataProto(md metadata.MD) *pb.Metadata {
- ret := &pb.Metadata{}
- for k, vv := range md {
- if metadataKeyOmit(k) {
- continue
- }
- for _, v := range vv {
- ret.Entry = append(ret.Entry,
- &pb.MetadataEntry{
- Key: k,
- Value: []byte(v),
- },
- )
- }
- }
- return ret
-}
-
-func addrToProto(addr net.Addr) *pb.Address {
- ret := &pb.Address{}
- switch a := addr.(type) {
- case *net.TCPAddr:
- if a.IP.To4() != nil {
- ret.Type = pb.Address_TYPE_IPV4
- } else if a.IP.To16() != nil {
- ret.Type = pb.Address_TYPE_IPV6
- } else {
- ret.Type = pb.Address_TYPE_UNKNOWN
- // Do not set address and port fields.
- break
- }
- ret.Address = a.IP.String()
- ret.IpPort = uint32(a.Port)
- case *net.UnixAddr:
- ret.Type = pb.Address_TYPE_UNIX
- ret.Address = a.String()
- default:
- ret.Type = pb.Address_TYPE_UNKNOWN
- }
- return ret
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
deleted file mode 100755
index 113d40cbe..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# Copyright 2018 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eux -o pipefail
-
-TMP=$(mktemp -d)
-
-function finish {
- rm -rf "$TMP"
-}
-trap finish EXIT
-
-pushd "$TMP"
-mkdir -p grpc/binarylog/grpc_binarylog_v1
-curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
-
-protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
-popd
-rm -f ./grpc_binarylog_v1/*.pb.go
-cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
-
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
deleted file mode 100644
index 05c694a58..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "io"
-
- "github.com/golang/protobuf/proto"
- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
-)
-
-// SetDefaultSink sets the sink where binary logs will be written to.
-//
-// Not thread safe. Only set during initialization.
-func SetDefaultSink(s Sink) {
- defaultSink = s
-}
-
-// Sink writes log entry into the binary log sink.
-type Sink interface {
- Write(*pb.GrpcLogEntry)
-}
-
-type noopSink struct{}
-
-func (ns *noopSink) Write(*pb.GrpcLogEntry) {}
-
-// NewWriterSink creates a binary log sink with the given writer.
-func NewWriterSink(w io.Writer) Sink {
- return &writerSink{out: w}
-}
-
-type writerSink struct {
- out io.Writer
-}
-
-func (fs *writerSink) Write(e *pb.GrpcLogEntry) {
- b, err := proto.Marshal(e)
- if err != nil {
- grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
- }
- fs.out.Write(b)
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
deleted file mode 100644
index 15dc7803d..000000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/util.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "errors"
- "strings"
-)
-
-// parseMethodName splits service and method from the input. It expects format
-// "/service/method".
-//
-// TODO: move to internal/grpcutil.
-func parseMethodName(methodName string) (service, method string, _ error) {
- if !strings.HasPrefix(methodName, "/") {
- return "", "", errors.New("invalid method name: should start with /")
- }
- methodName = methodName[1:]
-
- pos := strings.LastIndex(methodName, "/")
- if pos < 0 {
- return "", "", errors.New("invalid method name: suffix /method is missing")
- }
- return methodName[:pos], methodName[pos+1:], nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
deleted file mode 100644
index 6e729fa63..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ /dev/null
@@ -1,662 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package channelz defines APIs for enabling channelz service, entry
-// registration/deletion, and accessing channelz data. It also defines channelz
-// metric struct formats.
-//
-// All APIs in this package are experimental.
-package channelz
-
-import (
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- defaultMaxTraceEntry int32 = 30
-)
-
-var (
- db dbWrapper
- idGen idGenerator
- // EntryPerPage defines the number of channelz entries to be shown on a web page.
- EntryPerPage = 50
- curState int32
- maxTraceEntry = defaultMaxTraceEntry
-)
-
-// TurnOn turns on channelz data collection.
-func TurnOn() {
- if !IsOn() {
- NewChannelzStorage()
- atomic.StoreInt32(&curState, 1)
- }
-}
-
-// IsOn returns whether channelz data collection is on.
-func IsOn() bool {
- return atomic.CompareAndSwapInt32(&curState, 1, 1)
-}
-
-// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
-// Setting it to 0 will disable channel tracing.
-func SetMaxTraceEntry(i int32) {
- atomic.StoreInt32(&maxTraceEntry, i)
-}
-
-// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
-func ResetMaxTraceEntryToDefault() {
- atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
-}
-
-func getMaxTraceEntry() int {
- i := atomic.LoadInt32(&maxTraceEntry)
- return int(i)
-}
-
-// dbWarpper wraps around a reference to internal channelz data storage, and
-// provide synchronized functionality to set and get the reference.
-type dbWrapper struct {
- mu sync.RWMutex
- DB *channelMap
-}
-
-func (d *dbWrapper) set(db *channelMap) {
- d.mu.Lock()
- d.DB = db
- d.mu.Unlock()
-}
-
-func (d *dbWrapper) get() *channelMap {
- d.mu.RLock()
- defer d.mu.RUnlock()
- return d.DB
-}
-
-// NewChannelzStorage initializes channelz data storage and id generator.
-//
-// Note: This function is exported for testing purpose only. User should not call
-// it in most cases.
-func NewChannelzStorage() {
- db.set(&channelMap{
- topLevelChannels: make(map[int64]struct{}),
- channels: make(map[int64]*channel),
- listenSockets: make(map[int64]*listenSocket),
- normalSockets: make(map[int64]*normalSocket),
- servers: make(map[int64]*server),
- subChannels: make(map[int64]*subChannel),
- })
- idGen.reset()
-}
-
-// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
-// boolean indicating whether there's more top channels to be queried for.
-//
-// The arg id specifies that only top channel with id at or above it will be included
-// in the result. The returned slice is up to a length of EntryPerPage, and is
-// sorted in ascending id order.
-func GetTopChannels(id int64) ([]*ChannelMetric, bool) {
- return db.get().GetTopChannels(id)
-}
-
-// GetServers returns a slice of server's ServerMetric, along with a
-// boolean indicating whether there's more servers to be queried for.
-//
-// The arg id specifies that only server with id at or above it will be included
-// in the result. The returned slice is up to a length of EntryPerPage, and is
-// sorted in ascending id order.
-func GetServers(id int64) ([]*ServerMetric, bool) {
- return db.get().GetServers(id)
-}
-
-// GetServerSockets returns a slice of server's (identified by id) normal socket's
-// SocketMetric, along with a boolean indicating whether there's more sockets to
-// be queried for.
-//
-// The arg startID specifies that only sockets with id at or above it will be
-// included in the result. The returned slice is up to a length of EntryPerPage,
-// and is sorted in ascending id order.
-func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
- return db.get().GetServerSockets(id, startID)
-}
-
-// GetChannel returns the ChannelMetric for the channel (identified by id).
-func GetChannel(id int64) *ChannelMetric {
- return db.get().GetChannel(id)
-}
-
-// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
-func GetSubChannel(id int64) *SubChannelMetric {
- return db.get().GetSubChannel(id)
-}
-
-// GetSocket returns the SocketInternalMetric for the socket (identified by id).
-func GetSocket(id int64) *SocketMetric {
- return db.get().GetSocket(id)
-}
-
-// RegisterChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
-// assigned to this channel.
-func RegisterChannel(c Channel, pid int64, ref string) int64 {
- id := idGen.genID()
- cn := &channel{
- refName: ref,
- c: c,
- subChans: make(map[int64]string),
- nestedChans: make(map[int64]string),
- id: id,
- pid: pid,
- trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
- }
- if pid == 0 {
- db.get().addChannel(id, cn, true, pid, ref)
- } else {
- db.get().addChannel(id, cn, false, pid, ref)
- }
- return id
-}
-
-// RegisterSubChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). It returns the unique channelz tracking id assigned to this subchannel.
-func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a SubChannel's parent id cannot be 0")
- return 0
- }
- id := idGen.genID()
- sc := &subChannel{
- refName: ref,
- c: c,
- sockets: make(map[int64]string),
- id: id,
- pid: pid,
- trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
- }
- db.get().addSubChannel(id, sc, pid, ref)
- return id
-}
-
-// RegisterServer registers the given server s in channelz database. It returns
-// the unique channelz tracking id assigned to this server.
-func RegisterServer(s Server, ref string) int64 {
- id := idGen.genID()
- svr := &server{
- refName: ref,
- s: s,
- sockets: make(map[int64]string),
- listenSockets: make(map[int64]string),
- id: id,
- }
- db.get().addServer(id, svr)
- return id
-}
-
-// RegisterListenSocket registers the given listen socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this listen socket.
-func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a ListenSocket's parent id cannot be 0")
- return 0
- }
- id := idGen.genID()
- ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addListenSocket(id, ls, pid, ref)
- return id
-}
-
-// RegisterNormalSocket registers the given normal socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this normal socket.
-func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a NormalSocket's parent id cannot be 0")
- return 0
- }
- id := idGen.genID()
- ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addNormalSocket(id, ns, pid, ref)
- return id
-}
-
-// RemoveEntry removes an entry with unique channelz trakcing id to be id from
-// channelz database.
-func RemoveEntry(id int64) {
- db.get().removeEntry(id)
-}
-
-// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
-// to the channel trace.
-// The Parent field is optional. It is used for event that will be recorded in the entity's parent
-// trace also.
-type TraceEventDesc struct {
- Desc string
- Severity Severity
- Parent *TraceEventDesc
-}
-
-// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
-func AddTraceEvent(id int64, desc *TraceEventDesc) {
- if getMaxTraceEntry() == 0 {
- return
- }
- db.get().traceEvent(id, desc)
-}
-
-// channelMap is the storage data structure for channelz.
-// Methods of channelMap can be divided in two two categories with respect to locking.
-// 1. Methods acquire the global lock.
-// 2. Methods that can only be called when global lock is held.
-// A second type of method need always to be called inside a first type of method.
-type channelMap struct {
- mu sync.RWMutex
- topLevelChannels map[int64]struct{}
- servers map[int64]*server
- channels map[int64]*channel
- subChannels map[int64]*subChannel
- listenSockets map[int64]*listenSocket
- normalSockets map[int64]*normalSocket
-}
-
-func (c *channelMap) addServer(id int64, s *server) {
- c.mu.Lock()
- s.cm = c
- c.servers[id] = s
- c.mu.Unlock()
-}
-
-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
- c.mu.Lock()
- cn.cm = c
- cn.trace.cm = c
- c.channels[id] = cn
- if isTopChannel {
- c.topLevelChannels[id] = struct{}{}
- } else {
- c.findEntry(pid).addChild(id, cn)
- }
- c.mu.Unlock()
-}
-
-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
- c.mu.Lock()
- sc.cm = c
- sc.trace.cm = c
- c.subChannels[id] = sc
- c.findEntry(pid).addChild(id, sc)
- c.mu.Unlock()
-}
-
-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
- c.mu.Lock()
- ls.cm = c
- c.listenSockets[id] = ls
- c.findEntry(pid).addChild(id, ls)
- c.mu.Unlock()
-}
-
-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
- c.mu.Lock()
- ns.cm = c
- c.normalSockets[id] = ns
- c.findEntry(pid).addChild(id, ns)
- c.mu.Unlock()
-}
-
-// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
-// wait on the deletion of its children and until no other entity's channel trace references it.
-// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
-// shutting down server will lead to the server being also deleted.
-func (c *channelMap) removeEntry(id int64) {
- c.mu.Lock()
- c.findEntry(id).triggerDelete()
- c.mu.Unlock()
-}
-
-// c.mu must be held by the caller
-func (c *channelMap) decrTraceRefCount(id int64) {
- e := c.findEntry(id)
- if v, ok := e.(tracedChannel); ok {
- v.decrTraceRefCount()
- e.deleteSelfIfReady()
- }
-}
-
-// c.mu must be held by the caller.
-func (c *channelMap) findEntry(id int64) entry {
- var v entry
- var ok bool
- if v, ok = c.channels[id]; ok {
- return v
- }
- if v, ok = c.subChannels[id]; ok {
- return v
- }
- if v, ok = c.servers[id]; ok {
- return v
- }
- if v, ok = c.listenSockets[id]; ok {
- return v
- }
- if v, ok = c.normalSockets[id]; ok {
- return v
- }
- return &dummyEntry{idNotFound: id}
-}
-
-// c.mu must be held by the caller
-// deleteEntry simply deletes an entry from the channelMap. Before calling this
-// method, caller must check this entry is ready to be deleted, i.e removeEntry()
-// has been called on it, and no children still exist.
-// Conditionals are ordered by the expected frequency of deletion of each entity
-// type, in order to optimize performance.
-func (c *channelMap) deleteEntry(id int64) {
- var ok bool
- if _, ok = c.normalSockets[id]; ok {
- delete(c.normalSockets, id)
- return
- }
- if _, ok = c.subChannels[id]; ok {
- delete(c.subChannels, id)
- return
- }
- if _, ok = c.channels[id]; ok {
- delete(c.channels, id)
- delete(c.topLevelChannels, id)
- return
- }
- if _, ok = c.listenSockets[id]; ok {
- delete(c.listenSockets, id)
- return
- }
- if _, ok = c.servers[id]; ok {
- delete(c.servers, id)
- return
- }
-}
-
-func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
- c.mu.Lock()
- child := c.findEntry(id)
- childTC, ok := child.(tracedChannel)
- if !ok {
- c.mu.Unlock()
- return
- }
- childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
- if desc.Parent != nil {
- parent := c.findEntry(child.getParentID())
- var chanType RefChannelType
- switch child.(type) {
- case *channel:
- chanType = RefChannel
- case *subChannel:
- chanType = RefSubChannel
- }
- if parentTC, ok := parent.(tracedChannel); ok {
- parentTC.getChannelTrace().append(&TraceEvent{
- Desc: desc.Parent.Desc,
- Severity: desc.Parent.Severity,
- Timestamp: time.Now(),
- RefID: id,
- RefName: childTC.getRefName(),
- RefType: chanType,
- })
- childTC.incrTraceRefCount()
- }
- }
- c.mu.Unlock()
-}
-
-type int64Slice []int64
-
-func (s int64Slice) Len() int { return len(s) }
-func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-
-func copyMap(m map[int64]string) map[int64]string {
- n := make(map[int64]string)
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
-
-func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) {
- c.mu.RLock()
- l := len(c.topLevelChannels)
- ids := make([]int64, 0, l)
- cns := make([]*channel, 0, min(l, EntryPerPage))
-
- for k := range c.topLevelChannels {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := 0
- var end bool
- var t []*ChannelMetric
- for i, v := range ids[idx:] {
- if count == EntryPerPage {
- break
- }
- if cn, ok := c.channels[v]; ok {
- cns = append(cns, cn)
- t = append(t, &ChannelMetric{
- NestedChans: copyMap(cn.nestedChans),
- SubChans: copyMap(cn.subChans),
- })
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
-
- for i, cn := range cns {
- t[i].ChannelData = cn.c.ChannelzMetric()
- t[i].ID = cn.id
- t[i].RefName = cn.refName
- t[i].Trace = cn.trace.dumpData()
- }
- return t, end
-}
-
-func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) {
- c.mu.RLock()
- l := len(c.servers)
- ids := make([]int64, 0, l)
- ss := make([]*server, 0, min(l, EntryPerPage))
- for k := range c.servers {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := 0
- var end bool
- var s []*ServerMetric
- for i, v := range ids[idx:] {
- if count == EntryPerPage {
- break
- }
- if svr, ok := c.servers[v]; ok {
- ss = append(ss, svr)
- s = append(s, &ServerMetric{
- ListenSockets: copyMap(svr.listenSockets),
- })
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
-
- for i, svr := range ss {
- s[i].ServerData = svr.s.ChannelzMetric()
- s[i].ID = svr.id
- s[i].RefName = svr.refName
- }
- return s, end
-}
-
-func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) {
- var svr *server
- var ok bool
- c.mu.RLock()
- if svr, ok = c.servers[id]; !ok {
- // server with id doesn't exist.
- c.mu.RUnlock()
- return nil, true
- }
- svrskts := svr.sockets
- l := len(svrskts)
- ids := make([]int64, 0, l)
- sks := make([]*normalSocket, 0, min(l, EntryPerPage))
- for k := range svrskts {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := 0
- var end bool
- for i, v := range ids[idx:] {
- if count == EntryPerPage {
- break
- }
- if ns, ok := c.normalSockets[v]; ok {
- sks = append(sks, ns)
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
- var s []*SocketMetric
- for _, ns := range sks {
- sm := &SocketMetric{}
- sm.SocketData = ns.s.ChannelzMetric()
- sm.ID = ns.id
- sm.RefName = ns.refName
- s = append(s, sm)
- }
- return s, end
-}
-
-func (c *channelMap) GetChannel(id int64) *ChannelMetric {
- cm := &ChannelMetric{}
- var cn *channel
- var ok bool
- c.mu.RLock()
- if cn, ok = c.channels[id]; !ok {
- // channel with id doesn't exist.
- c.mu.RUnlock()
- return nil
- }
- cm.NestedChans = copyMap(cn.nestedChans)
- cm.SubChans = copyMap(cn.subChans)
- c.mu.RUnlock()
- cm.ChannelData = cn.c.ChannelzMetric()
- cm.ID = cn.id
- cm.RefName = cn.refName
- cm.Trace = cn.trace.dumpData()
- return cm
-}
-
-func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
- cm := &SubChannelMetric{}
- var sc *subChannel
- var ok bool
- c.mu.RLock()
- if sc, ok = c.subChannels[id]; !ok {
- // subchannel with id doesn't exist.
- c.mu.RUnlock()
- return nil
- }
- cm.Sockets = copyMap(sc.sockets)
- c.mu.RUnlock()
- cm.ChannelData = sc.c.ChannelzMetric()
- cm.ID = sc.id
- cm.RefName = sc.refName
- cm.Trace = sc.trace.dumpData()
- return cm
-}
-
-func (c *channelMap) GetSocket(id int64) *SocketMetric {
- sm := &SocketMetric{}
- c.mu.RLock()
- if ls, ok := c.listenSockets[id]; ok {
- c.mu.RUnlock()
- sm.SocketData = ls.s.ChannelzMetric()
- sm.ID = ls.id
- sm.RefName = ls.refName
- return sm
- }
- if ns, ok := c.normalSockets[id]; ok {
- c.mu.RUnlock()
- sm.SocketData = ns.s.ChannelzMetric()
- sm.ID = ns.id
- sm.RefName = ns.refName
- return sm
- }
- c.mu.RUnlock()
- return nil
-}
-
-type idGenerator struct {
- id int64
-}
-
-func (i *idGenerator) reset() {
- atomic.StoreInt64(&i.id, 0)
-}
-
-func (i *idGenerator) genID() int64 {
- return atomic.AddInt64(&i.id, 1)
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
deleted file mode 100644
index 17c2274cb..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ /dev/null
@@ -1,702 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
-)
-
-// entry represents a node in the channelz database.
-type entry interface {
- // addChild adds a child e, whose channelz id is id to child list
- addChild(id int64, e entry)
- // deleteChild deletes a child with channelz id to be id from child list
- deleteChild(id int64)
- // triggerDelete tries to delete self from channelz database. However, if child
- // list is not empty, then deletion from the database is on hold until the last
- // child is deleted from database.
- triggerDelete()
- // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
- // list is now empty. If both conditions are met, then delete self from database.
- deleteSelfIfReady()
- // getParentID returns parent ID of the entry. 0 value parent ID means no parent.
- getParentID() int64
-}
-
-// dummyEntry is a fake entry to handle entry not found case.
-type dummyEntry struct {
- idNotFound int64
-}
-
-func (d *dummyEntry) addChild(id int64, e entry) {
- // Note: It is possible for a normal program to reach here under race condition.
- // For example, there could be a race between ClientConn.Close() info being propagated
- // to addrConn and http2Client. ClientConn.Close() cancel the context and result
- // in http2Client to error. The error info is then caught by transport monitor
- // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
- // the addrConn will create a new transport. And when registering the new transport in
- // channelz, its parent addrConn could have already been torn down and deleted
- // from channelz tracking, and thus reach the code here.
- grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
-}
-
-func (d *dummyEntry) deleteChild(id int64) {
- // It is possible for a normal program to reach here under race condition.
- // Refer to the example described in addChild().
- grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
-}
-
-func (d *dummyEntry) triggerDelete() {
- grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
-}
-
-func (*dummyEntry) deleteSelfIfReady() {
- // code should not reach here. deleteSelfIfReady is always called on an existing entry.
-}
-
-func (*dummyEntry) getParentID() int64 {
- return 0
-}
-
-// ChannelMetric defines the info channelz provides for a specific Channel, which
-// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ChannelMetric struct {
- // ID is the channelz id of this channel.
- ID int64
- // RefName is the human readable reference string of this channel.
- RefName string
- // ChannelData contains channel internal metric reported by the channel through
- // ChannelzMetric().
- ChannelData *ChannelInternalMetric
- // NestedChans tracks the nested channel type children of this channel in the format of
- // a map from nested channel channelz id to corresponding reference string.
- NestedChans map[int64]string
- // SubChans tracks the subchannel type children of this channel in the format of a
- // map from subchannel channelz id to corresponding reference string.
- SubChans map[int64]string
- // Sockets tracks the socket type children of this channel in the format of a map
- // from socket channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow channel having sockets directly,
- // therefore, this is field is unused.
- Sockets map[int64]string
- // Trace contains the most recent traced events.
- Trace *ChannelTrace
-}
-
-// SubChannelMetric defines the info channelz provides for a specific SubChannel,
-// which includes ChannelInternalMetric and channelz-specific data, such as
-// channelz id, child list, etc.
-type SubChannelMetric struct {
- // ID is the channelz id of this subchannel.
- ID int64
- // RefName is the human readable reference string of this subchannel.
- RefName string
- // ChannelData contains subchannel internal metric reported by the subchannel
- // through ChannelzMetric().
- ChannelData *ChannelInternalMetric
- // NestedChans tracks the nested channel type children of this subchannel in the format of
- // a map from nested channel channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow subchannel to have nested channels
- // as children, therefore, this field is unused.
- NestedChans map[int64]string
- // SubChans tracks the subchannel type children of this subchannel in the format of a
- // map from subchannel channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow subchannel to have subchannels
- // as children, therefore, this field is unused.
- SubChans map[int64]string
- // Sockets tracks the socket type children of this subchannel in the format of a map
- // from socket channelz id to corresponding reference string.
- Sockets map[int64]string
- // Trace contains the most recent traced events.
- Trace *ChannelTrace
-}
-
-// ChannelInternalMetric defines the struct that the implementor of Channel interface
-// should return from ChannelzMetric().
-type ChannelInternalMetric struct {
- // current connectivity state of the channel.
- State connectivity.State
- // The target this channel originally tried to connect to. May be absent
- Target string
- // The number of calls started on the channel.
- CallsStarted int64
- // The number of calls that have completed with an OK status.
- CallsSucceeded int64
- // The number of calls that have a completed with a non-OK status.
- CallsFailed int64
- // The last time a call was started on the channel.
- LastCallStartedTimestamp time.Time
-}
-
-// ChannelTrace stores traced events on a channel/subchannel and related info.
-type ChannelTrace struct {
- // EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
- EventNum int64
- // CreationTime is the creation time of the trace.
- CreationTime time.Time
- // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
- // oldest one)
- Events []*TraceEvent
-}
-
-// TraceEvent represent a single trace event
-type TraceEvent struct {
- // Desc is a simple description of the trace event.
- Desc string
- // Severity states the severity of this trace event.
- Severity Severity
- // Timestamp is the event time.
- Timestamp time.Time
- // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
- // involved in this event.
- // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
- RefID int64
- // RefName is the reference name for the entity that gets referenced in the event.
- RefName string
- // RefType indicates the referenced entity type, i.e Channel or SubChannel.
- RefType RefChannelType
-}
-
-// Channel is the interface that should be satisfied in order to be tracked by
-// channelz as Channel or SubChannel.
-type Channel interface {
- ChannelzMetric() *ChannelInternalMetric
-}
-
-type dummyChannel struct{}
-
-func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
- return &ChannelInternalMetric{}
-}
-
-type channel struct {
- refName string
- c Channel
- closeCalled bool
- nestedChans map[int64]string
- subChans map[int64]string
- id int64
- pid int64
- cm *channelMap
- trace *channelTrace
- // traceRefCount is the number of trace events that reference this channel.
- // Non-zero traceRefCount means the trace of this channel cannot be deleted.
- traceRefCount int32
-}
-
-func (c *channel) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *subChannel:
- c.subChans[id] = v.refName
- case *channel:
- c.nestedChans[id] = v.refName
- default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
- }
-}
-
-func (c *channel) deleteChild(id int64) {
- delete(c.subChans, id)
- delete(c.nestedChans, id)
- c.deleteSelfIfReady()
-}
-
-func (c *channel) triggerDelete() {
- c.closeCalled = true
- c.deleteSelfIfReady()
-}
-
-func (c *channel) getParentID() int64 {
- return c.pid
-}
-
-// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
-// deleting the channel reference from its parent's child list.
-//
-// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
-// corresponding grpc object has been invoked, and the channel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (c *channel) deleteSelfFromTree() (deleted bool) {
- if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
- return false
- }
- // not top channel
- if c.pid != 0 {
- c.cm.findEntry(c.pid).deleteChild(c.id)
- }
- return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
-// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
-// channel, and its memory will be garbage collected.
-//
-// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (c *channel) deleteSelfFromMap() (delete bool) {
- if c.getTraceRefCount() != 0 {
- c.c = &dummyChannel{}
- return false
- }
- return true
-}
-
-// deleteSelfIfReady tries to delete the channel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
-// parent's child list.
-// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
-// will return entry not found error.
-func (c *channel) deleteSelfIfReady() {
- if !c.deleteSelfFromTree() {
- return
- }
- if !c.deleteSelfFromMap() {
- return
- }
- c.cm.deleteEntry(c.id)
- c.trace.clear()
-}
-
-func (c *channel) getChannelTrace() *channelTrace {
- return c.trace
-}
-
-func (c *channel) incrTraceRefCount() {
- atomic.AddInt32(&c.traceRefCount, 1)
-}
-
-func (c *channel) decrTraceRefCount() {
- atomic.AddInt32(&c.traceRefCount, -1)
-}
-
-func (c *channel) getTraceRefCount() int {
- i := atomic.LoadInt32(&c.traceRefCount)
- return int(i)
-}
-
-func (c *channel) getRefName() string {
- return c.refName
-}
-
-type subChannel struct {
- refName string
- c Channel
- closeCalled bool
- sockets map[int64]string
- id int64
- pid int64
- cm *channelMap
- trace *channelTrace
- traceRefCount int32
-}
-
-func (sc *subChannel) addChild(id int64, e entry) {
- if v, ok := e.(*normalSocket); ok {
- sc.sockets[id] = v.refName
- } else {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
- }
-}
-
-func (sc *subChannel) deleteChild(id int64) {
- delete(sc.sockets, id)
- sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) triggerDelete() {
- sc.closeCalled = true
- sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) getParentID() int64 {
- return sc.pid
-}
-
-// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
-// means deleting the subchannel reference from its parent's child list.
-//
-// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
-// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
- if !sc.closeCalled || len(sc.sockets) != 0 {
- return false
- }
- sc.cm.findEntry(sc.pid).deleteChild(sc.id)
- return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
-// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
-// the subchannel, and its memory will be garbage collected.
-//
-// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (sc *subChannel) deleteSelfFromMap() (delete bool) {
- if sc.getTraceRefCount() != 0 {
- // free the grpc struct (i.e. addrConn)
- sc.c = &dummyChannel{}
- return false
- }
- return true
-}
-
-// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
-// its parent's child list.
-// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
-// by id will return entry not found error.
-func (sc *subChannel) deleteSelfIfReady() {
- if !sc.deleteSelfFromTree() {
- return
- }
- if !sc.deleteSelfFromMap() {
- return
- }
- sc.cm.deleteEntry(sc.id)
- sc.trace.clear()
-}
-
-func (sc *subChannel) getChannelTrace() *channelTrace {
- return sc.trace
-}
-
-func (sc *subChannel) incrTraceRefCount() {
- atomic.AddInt32(&sc.traceRefCount, 1)
-}
-
-func (sc *subChannel) decrTraceRefCount() {
- atomic.AddInt32(&sc.traceRefCount, -1)
-}
-
-func (sc *subChannel) getTraceRefCount() int {
- i := atomic.LoadInt32(&sc.traceRefCount)
- return int(i)
-}
-
-func (sc *subChannel) getRefName() string {
- return sc.refName
-}
-
-// SocketMetric defines the info channelz provides for a specific Socket, which
-// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
-type SocketMetric struct {
- // ID is the channelz id of this socket.
- ID int64
- // RefName is the human readable reference string of this socket.
- RefName string
- // SocketData contains socket internal metric reported by the socket through
- // ChannelzMetric().
- SocketData *SocketInternalMetric
-}
-
-// SocketInternalMetric defines the struct that the implementor of Socket interface
-// should return from ChannelzMetric().
-type SocketInternalMetric struct {
- // The number of streams that have been started.
- StreamsStarted int64
- // The number of streams that have ended successfully:
- // On client side, receiving frame with eos bit set.
- // On server side, sending frame with eos bit set.
- StreamsSucceeded int64
- // The number of streams that have ended unsuccessfully:
- // On client side, termination without receiving frame with eos bit set.
- // On server side, termination without sending frame with eos bit set.
- StreamsFailed int64
- // The number of messages successfully sent on this socket.
- MessagesSent int64
- MessagesReceived int64
- // The number of keep alives sent. This is typically implemented with HTTP/2
- // ping messages.
- KeepAlivesSent int64
- // The last time a stream was created by this endpoint. Usually unset for
- // servers.
- LastLocalStreamCreatedTimestamp time.Time
- // The last time a stream was created by the remote endpoint. Usually unset
- // for clients.
- LastRemoteStreamCreatedTimestamp time.Time
- // The last time a message was sent by this endpoint.
- LastMessageSentTimestamp time.Time
- // The last time a message was received by this endpoint.
- LastMessageReceivedTimestamp time.Time
- // The amount of window, granted to the local endpoint by the remote endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- LocalFlowControlWindow int64
- // The amount of window, granted to the remote endpoint by the local endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- RemoteFlowControlWindow int64
- // The locally bound address.
- LocalAddr net.Addr
- // The remote bound address. May be absent.
- RemoteAddr net.Addr
- // Optional, represents the name of the remote endpoint, if different than
- // the original target name.
- RemoteName string
- SocketOptions *SocketOptionData
- Security credentials.ChannelzSecurityValue
-}
-
-// Socket is the interface that should be satisfied in order to be tracked by
-// channelz as Socket.
-type Socket interface {
- ChannelzMetric() *SocketInternalMetric
-}
-
-type listenSocket struct {
- refName string
- s Socket
- id int64
- pid int64
- cm *channelMap
-}
-
-func (ls *listenSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
-}
-
-func (ls *listenSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
-}
-
-func (ls *listenSocket) triggerDelete() {
- ls.cm.deleteEntry(ls.id)
- ls.cm.findEntry(ls.pid).deleteChild(ls.id)
-}
-
-func (ls *listenSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
-}
-
-func (ls *listenSocket) getParentID() int64 {
- return ls.pid
-}
-
-type normalSocket struct {
- refName string
- s Socket
- id int64
- pid int64
- cm *channelMap
-}
-
-func (ns *normalSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
-}
-
-func (ns *normalSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
-}
-
-func (ns *normalSocket) triggerDelete() {
- ns.cm.deleteEntry(ns.id)
- ns.cm.findEntry(ns.pid).deleteChild(ns.id)
-}
-
-func (ns *normalSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
-}
-
-func (ns *normalSocket) getParentID() int64 {
- return ns.pid
-}
-
-// ServerMetric defines the info channelz provides for a specific Server, which
-// includes ServerInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ServerMetric struct {
- // ID is the channelz id of this server.
- ID int64
- // RefName is the human readable reference string of this server.
- RefName string
- // ServerData contains server internal metric reported by the server through
- // ChannelzMetric().
- ServerData *ServerInternalMetric
- // ListenSockets tracks the listener socket type children of this server in the
- // format of a map from socket channelz id to corresponding reference string.
- ListenSockets map[int64]string
-}
-
-// ServerInternalMetric defines the struct that the implementor of Server interface
-// should return from ChannelzMetric().
-type ServerInternalMetric struct {
- // The number of incoming calls started on the server.
- CallsStarted int64
- // The number of incoming calls that have completed with an OK status.
- CallsSucceeded int64
- // The number of incoming calls that have a completed with a non-OK status.
- CallsFailed int64
- // The last time a call was started on the server.
- LastCallStartedTimestamp time.Time
-}
-
-// Server is the interface to be satisfied in order to be tracked by channelz as
-// Server.
-type Server interface {
- ChannelzMetric() *ServerInternalMetric
-}
-
-type server struct {
- refName string
- s Server
- closeCalled bool
- sockets map[int64]string
- listenSockets map[int64]string
- id int64
- cm *channelMap
-}
-
-func (s *server) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *normalSocket:
- s.sockets[id] = v.refName
- case *listenSocket:
- s.listenSockets[id] = v.refName
- default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
- }
-}
-
-func (s *server) deleteChild(id int64) {
- delete(s.sockets, id)
- delete(s.listenSockets, id)
- s.deleteSelfIfReady()
-}
-
-func (s *server) triggerDelete() {
- s.closeCalled = true
- s.deleteSelfIfReady()
-}
-
-func (s *server) deleteSelfIfReady() {
- if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
- return
- }
- s.cm.deleteEntry(s.id)
-}
-
-func (s *server) getParentID() int64 {
- return 0
-}
-
-type tracedChannel interface {
- getChannelTrace() *channelTrace
- incrTraceRefCount()
- decrTraceRefCount()
- getRefName() string
-}
-
-type channelTrace struct {
- cm *channelMap
- createdTime time.Time
- eventCount int64
- mu sync.Mutex
- events []*TraceEvent
-}
-
-func (c *channelTrace) append(e *TraceEvent) {
- c.mu.Lock()
- if len(c.events) == getMaxTraceEntry() {
- del := c.events[0]
- c.events = c.events[1:]
- if del.RefID != 0 {
- // start recursive cleanup in a goroutine to not block the call originated from grpc.
- go func() {
- // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
- c.cm.mu.Lock()
- c.cm.decrTraceRefCount(del.RefID)
- c.cm.mu.Unlock()
- }()
- }
- }
- e.Timestamp = time.Now()
- c.events = append(c.events, e)
- c.eventCount++
- c.mu.Unlock()
-}
-
-func (c *channelTrace) clear() {
- c.mu.Lock()
- for _, e := range c.events {
- if e.RefID != 0 {
- // caller should have already held the c.cm.mu lock.
- c.cm.decrTraceRefCount(e.RefID)
- }
- }
- c.mu.Unlock()
-}
-
-// Severity is the severity level of a trace event.
-// The canonical enumeration of all valid values is here:
-// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
-type Severity int
-
-const (
- // CtUNKNOWN indicates unknown severity of a trace event.
- CtUNKNOWN Severity = iota
- // CtINFO indicates info level severity of a trace event.
- CtINFO
- // CtWarning indicates warning level severity of a trace event.
- CtWarning
- // CtError indicates error level severity of a trace event.
- CtError
-)
-
-// RefChannelType is the type of the entity being referenced in a trace event.
-type RefChannelType int
-
-const (
- // RefChannel indicates the referenced entity is a Channel.
- RefChannel RefChannelType = iota
- // RefSubChannel indicates the referenced entity is a SubChannel.
- RefSubChannel
-)
-
-func (c *channelTrace) dumpData() *ChannelTrace {
- c.mu.Lock()
- ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
- ct.Events = c.events[:len(c.events)]
- c.mu.Unlock()
- return ct
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
deleted file mode 100644
index 07215396d..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// +build !appengine,go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-type SocketOptionData struct {
- Linger *unix.Linger
- RecvTimeout *unix.Timeval
- SendTimeout *unix.Timeval
- TCPInfo *unix.TCPInfo
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
- if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
- s.Linger = v
- }
- if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
- s.RecvTimeout = v
- }
- if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
- s.SendTimeout = v
- }
- if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
- s.TCPInfo = v
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
deleted file mode 100644
index b24600480..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build !linux appengine !go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "sync"
-
- "google.golang.org/grpc/grpclog"
-)
-
-var once sync.Once
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-// Windows OS doesn't support Socket Option
-type SocketOptionData struct {
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
- once.Do(func() {
- grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
- })
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go
deleted file mode 100644
index e1e9e32d7..000000000
--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux_go19.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// +build linux,go1.9,!appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "syscall"
-)
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(socket interface{}) *SocketOptionData {
- c, ok := socket.(syscall.Conn)
- if !ok {
- return nil
- }
- data := &SocketOptionData{}
- if rawConn, err := c.SyscallConn(); err == nil {
- rawConn.Control(data.Getsockopt)
- return data
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
deleted file mode 100644
index 200b115ca..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import (
- "math/rand"
- "sync"
- "time"
-)
-
-var (
- r = rand.New(rand.NewSource(time.Now().UnixNano()))
- mu sync.Mutex
-)
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
- mu.Lock()
- res := r.Int63n(n)
- mu.Unlock()
- return res
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
- mu.Lock()
- res := r.Intn(n)
- mu.Unlock()
- return res
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
- mu.Lock()
- res := r.Float64()
- mu.Unlock()
- return res
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
deleted file mode 100644
index 85dbea886..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcsync implements additional synchronization primitives built upon
-// the sync package.
-package grpcsync
-
-import "sync"
-
-// Event represents a one-time event that may occur in the future.
-type Event struct {
- c chan struct{}
- o sync.Once
-}
-
-// Fire causes e to complete. It is safe to call multiple times, and
-// concurrently. It returns true iff this call to Fire caused the signaling
-// channel returned by Done to close.
-func (e *Event) Fire() bool {
- ret := false
- e.o.Do(func() {
- close(e.c)
- ret = true
- })
- return ret
-}
-
-// Done returns a channel that will be closed when Fire is called.
-func (e *Event) Done() <-chan struct{} {
- return e.c
-}
-
-// HasFired returns true if Fire has been called.
-func (e *Event) HasFired() bool {
- select {
- case <-e.c:
- return true
- default:
- return false
- }
-}
-
-// NewEvent returns a new, ready-to-use Event.
-func NewEvent() *Event {
- return &Event{c: make(chan struct{})}
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 41f8af678..53f177520 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -15,29 +15,13 @@
*
*/
-// Package internal contains gRPC-internal code, to avoid polluting
-// the godoc of the top-level grpc package. It must not import any grpc
-// symbols to avoid circular dependencies.
+// Package internal contains gRPC-internal code for testing, to avoid polluting
+// the godoc of the top-level grpc package.
package internal
-import "golang.org/x/net/context"
-
-var (
- // WithContextDialer is exported by clientconn.go
- WithContextDialer interface{} // func(context.Context, string) (net.Conn, error) grpc.DialOption
- // WithResolverBuilder is exported by clientconn.go
- WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
- // HealthCheckFunc is used to provide client-side LB channel health checking
- HealthCheckFunc func(ctx context.Context, newStream func() (interface{}, error), reportHealth func(bool), serviceName string) error
-)
-
-const (
- // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
- CredsBundleModeFallback = "fallback"
- // CredsBundleModeBalancer switches GoogleDefaultCreds to grpclb balancer
- // mode.
- CredsBundleModeBalancer = "balancer"
- // CredsBundleModeBackendFromBalancer switches GoogleDefaultCreds to mode
- // that supports backend returned by grpclb balancer.
- CredsBundleModeBackendFromBalancer = "backend-from-balancer"
-)
+// TestingUseHandlerImpl enables the http.Handler-based server implementation.
+// It must be called before Serve and requires TLS credentials.
+//
+// The provided grpcServer must be of type *grpc.Server. It is untyped
+// for circular dependency reasons.
+var TestingUseHandlerImpl func(grpcServer interface{})
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
deleted file mode 100644
index 87bc65a19..000000000
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// +build !appengine,go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package syscall provides functionalities that grpc uses to get low-level operating system
-// stats/info.
-package syscall
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
- "google.golang.org/grpc/grpclog"
-)
-
-// GetCPUTime returns the how much CPU time has passed since the start of this process.
-func GetCPUTime() int64 {
- var ts unix.Timespec
- if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
- grpclog.Fatal(err)
- }
- return ts.Nano()
-}
-
-// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
-type Rusage syscall.Rusage
-
-// GetRusage returns the resource usage of current process.
-func GetRusage() (rusage *Rusage) {
- rusage = new(Rusage)
- syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
- return
-}
-
-// CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
- f := (*syscall.Rusage)(first)
- l := (*syscall.Rusage)(latest)
- var (
- utimeDiffs = l.Utime.Sec - f.Utime.Sec
- utimeDiffus = l.Utime.Usec - f.Utime.Usec
- stimeDiffs = l.Stime.Sec - f.Stime.Sec
- stimeDiffus = l.Stime.Usec - f.Stime.Usec
- )
-
- uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
- sTimeElapsed := float64(stimeDiffs) + float64(stimeDiffus)*1.0e-6
-
- return uTimeElapsed, sTimeElapsed
-}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
deleted file mode 100644
index 16a5c3fe4..000000000
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build !linux appengine !go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package syscall
-
-import "google.golang.org/grpc/grpclog"
-
-func init() {
- grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
-}
-
-// GetCPUTime returns the how much CPU time has passed since the start of this process.
-// It always returns 0 under non-linux or appengine environment.
-func GetCPUTime() int64 {
- return 0
-}
-
-// Rusage is an empty struct under non-linux or appengine environment.
-type Rusage struct{}
-
-// GetRusage is a no-op function under non-linux or appengine environment.
-func GetRusage() (rusage *Rusage) {
- return nil
-}
-
-// CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs. It a no-op function for non-linux or appengine environment.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
- return 0, 0
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
deleted file mode 100644
index 204ba1588..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ /dev/null
@@ -1,852 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "sync"
-
- "golang.org/x/net/http2"
- "golang.org/x/net/http2/hpack"
-)
-
-var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
- e.SetMaxDynamicTableSizeLimit(v)
-}
-
-type itemNode struct {
- it interface{}
- next *itemNode
-}
-
-type itemList struct {
- head *itemNode
- tail *itemNode
-}
-
-func (il *itemList) enqueue(i interface{}) {
- n := &itemNode{it: i}
- if il.tail == nil {
- il.head, il.tail = n, n
- return
- }
- il.tail.next = n
- il.tail = n
-}
-
-// peek returns the first item in the list without removing it from the
-// list.
-func (il *itemList) peek() interface{} {
- return il.head.it
-}
-
-func (il *itemList) dequeue() interface{} {
- if il.head == nil {
- return nil
- }
- i := il.head.it
- il.head = il.head.next
- if il.head == nil {
- il.tail = nil
- }
- return i
-}
-
-func (il *itemList) dequeueAll() *itemNode {
- h := il.head
- il.head, il.tail = nil, nil
- return h
-}
-
-func (il *itemList) isEmpty() bool {
- return il.head == nil
-}
-
-// The following defines various control items which could flow through
-// the control buffer of transport. They represent different aspects of
-// control tasks, e.g., flow control, settings, streaming resetting, etc.
-
-// registerStream is used to register an incoming stream with loopy writer.
-type registerStream struct {
- streamID uint32
- wq *writeQuota
-}
-
-// headerFrame is also used to register stream on the client-side.
-type headerFrame struct {
- streamID uint32
- hf []hpack.HeaderField
- endStream bool // Valid on server side.
- initStream func(uint32) (bool, error) // Used only on the client side.
- onWrite func()
- wq *writeQuota // write quota for the stream created.
- cleanup *cleanupStream // Valid on the server side.
- onOrphaned func(error) // Valid on client-side
-}
-
-type cleanupStream struct {
- streamID uint32
- rst bool
- rstCode http2.ErrCode
- onWrite func()
-}
-
-type dataFrame struct {
- streamID uint32
- endStream bool
- h []byte
- d []byte
- // onEachWrite is called every time
- // a part of d is written out.
- onEachWrite func()
-}
-
-type incomingWindowUpdate struct {
- streamID uint32
- increment uint32
-}
-
-type outgoingWindowUpdate struct {
- streamID uint32
- increment uint32
-}
-
-type incomingSettings struct {
- ss []http2.Setting
-}
-
-type outgoingSettings struct {
- ss []http2.Setting
-}
-
-type incomingGoAway struct {
-}
-
-type goAway struct {
- code http2.ErrCode
- debugData []byte
- headsUp bool
- closeConn bool
-}
-
-type ping struct {
- ack bool
- data [8]byte
-}
-
-type outFlowControlSizeRequest struct {
- resp chan uint32
-}
-
-type outStreamState int
-
-const (
- active outStreamState = iota
- empty
- waitingOnStreamQuota
-)
-
-type outStream struct {
- id uint32
- state outStreamState
- itl *itemList
- bytesOutStanding int
- wq *writeQuota
-
- next *outStream
- prev *outStream
-}
-
-func (s *outStream) deleteSelf() {
- if s.prev != nil {
- s.prev.next = s.next
- }
- if s.next != nil {
- s.next.prev = s.prev
- }
- s.next, s.prev = nil, nil
-}
-
-type outStreamList struct {
- // Following are sentinel objects that mark the
- // beginning and end of the list. They do not
- // contain any item lists. All valid objects are
- // inserted in between them.
- // This is needed so that an outStream object can
- // deleteSelf() in O(1) time without knowing which
- // list it belongs to.
- head *outStream
- tail *outStream
-}
-
-func newOutStreamList() *outStreamList {
- head, tail := new(outStream), new(outStream)
- head.next = tail
- tail.prev = head
- return &outStreamList{
- head: head,
- tail: tail,
- }
-}
-
-func (l *outStreamList) enqueue(s *outStream) {
- e := l.tail.prev
- e.next = s
- s.prev = e
- s.next = l.tail
- l.tail.prev = s
-}
-
-// remove from the beginning of the list.
-func (l *outStreamList) dequeue() *outStream {
- b := l.head.next
- if b == l.tail {
- return nil
- }
- b.deleteSelf()
- return b
-}
-
-// controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
-type controlBuffer struct {
- ch chan struct{}
- done <-chan struct{}
- mu sync.Mutex
- consumerWaiting bool
- list *itemList
- err error
-}
-
-func newControlBuffer(done <-chan struct{}) *controlBuffer {
- return &controlBuffer{
- ch: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
- }
-}
-
-func (c *controlBuffer) put(it interface{}) error {
- _, err := c.executeAndPut(nil, it)
- return err
-}
-
-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) {
- var wakeUp bool
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if f != nil {
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- }
- if c.consumerWaiting {
- wakeUp = true
- c.consumerWaiting = false
- }
- c.list.enqueue(it)
- c.mu.Unlock()
- if wakeUp {
- select {
- case c.ch <- struct{}{}:
- default:
- }
- }
- return true, nil
-}
-
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- c.mu.Unlock()
- return true, nil
-}
-
-func (c *controlBuffer) get(block bool) (interface{}, error) {
- for {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return nil, c.err
- }
- if !c.list.isEmpty() {
- h := c.list.dequeue()
- c.mu.Unlock()
- return h, nil
- }
- if !block {
- c.mu.Unlock()
- return nil, nil
- }
- c.consumerWaiting = true
- c.mu.Unlock()
- select {
- case <-c.ch:
- case <-c.done:
- c.finish()
- return nil, ErrConnClosing
- }
- }
-}
-
-func (c *controlBuffer) finish() {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return
- }
- c.err = ErrConnClosing
- // There may be headers for streams in the control buffer.
- // These streams need to be cleaned out since the transport
- // is still not aware of these yet.
- for head := c.list.dequeueAll(); head != nil; head = head.next {
- hdr, ok := head.it.(*headerFrame)
- if !ok {
- continue
- }
- if hdr.onOrphaned != nil { // It will be nil on the server-side.
- hdr.onOrphaned(ErrConnClosing)
- }
- }
- c.mu.Unlock()
-}
-
-type side int
-
-const (
- clientSide side = iota
- serverSide
-)
-
-// Loopy receives frames from the control buffer.
-// Each frame is handled individually; most of the work done by loopy goes
-// into handling data frames. Loopy maintains a queue of active streams, and each
-// stream maintains a queue of data frames; as loopy receives data frames
-// it gets added to the queue of the relevant stream.
-// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
-// processing a stream, loopy writes out data bytes from this stream capped by the min
-// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
-type loopyWriter struct {
- side side
- cbuf *controlBuffer
- sendQuota uint32
- oiws uint32 // outbound initial window size.
- // estdStreams is map of all established streams that are not cleaned-up yet.
- // On client-side, this is all streams whose headers were sent out.
- // On server-side, this is all streams whose headers were received.
- estdStreams map[uint32]*outStream // Established streams.
- // activeStreams is a linked-list of all streams that have data to send and some
- // stream-level flow control quota.
- // Each of these streams internally have a list of data items(and perhaps trailers
- // on the server-side) to be sent out.
- activeStreams *outStreamList
- framer *framer
- hBuf *bytes.Buffer // The buffer for HPACK encoding.
- hEnc *hpack.Encoder // HPACK encoder.
- bdpEst *bdpEstimator
- draining bool
-
- // Side-specific handlers
- ssGoAwayHandler func(*goAway) (bool, error)
-}
-
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
- var buf bytes.Buffer
- l := &loopyWriter{
- side: s,
- cbuf: cbuf,
- sendQuota: defaultWindowSize,
- oiws: defaultWindowSize,
- estdStreams: make(map[uint32]*outStream),
- activeStreams: newOutStreamList(),
- framer: fr,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- bdpEst: bdpEst,
- }
- return l
-}
-
-const minBatchSize = 1000
-
-// run should be run in a separate goroutine.
-// It reads control frames from controlBuf and processes them by:
-// 1. Updating loopy's internal state, or/and
-// 2. Writing out HTTP2 frames on the wire.
-//
-// Loopy keeps all active streams with data to send in a linked-list.
-// All streams in the activeStreams linked-list must have both:
-// 1. Data to send, and
-// 2. Stream level flow control quota available.
-//
-// In each iteration of run loop, other than processing the incoming control
-// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
-// This results in writing of HTTP2 frames into an underlying write buffer.
-// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
-// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
-// if the batch size is too low to give stream goroutines a chance to fill it up.
-func (l *loopyWriter) run() (err error) {
- defer func() {
- if err == ErrConnClosing {
- // Don't log ErrConnClosing as error since it happens
- // 1. When the connection is closed by some other known issue.
- // 2. User closed the connection.
- // 3. A graceful close of connection.
- infof("transport: loopyWriter.run returning. %v", err)
- err = nil
- }
- }()
- for {
- it, err := l.cbuf.get(true)
- if err != nil {
- return err
- }
- if err = l.handle(it); err != nil {
- return err
- }
- if _, err = l.processData(); err != nil {
- return err
- }
- gosched := true
- hasdata:
- for {
- it, err := l.cbuf.get(false)
- if err != nil {
- return err
- }
- if it != nil {
- if err = l.handle(it); err != nil {
- return err
- }
- if _, err = l.processData(); err != nil {
- return err
- }
- continue hasdata
- }
- isEmpty, err := l.processData()
- if err != nil {
- return err
- }
- if !isEmpty {
- continue hasdata
- }
- if gosched {
- gosched = false
- if l.framer.writer.offset < minBatchSize {
- runtime.Gosched()
- continue hasdata
- }
- }
- l.framer.writer.Flush()
- break hasdata
-
- }
- }
-}
-
-func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error {
- return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
-}
-
-func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
- // Otherwise update the quota.
- if w.streamID == 0 {
- l.sendQuota += w.increment
- return nil
- }
- // Find the stream and update it.
- if str, ok := l.estdStreams[w.streamID]; ok {
- str.bytesOutStanding -= int(w.increment)
- if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
- str.state = active
- l.activeStreams.enqueue(str)
- return nil
- }
- }
- return nil
-}
-
-func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
- return l.framer.fr.WriteSettings(s.ss...)
-}
-
-func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
- if err := l.applySettings(s.ss); err != nil {
- return err
- }
- return l.framer.fr.WriteSettingsAck()
-}
-
-func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
- str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- }
- l.estdStreams[h.streamID] = str
- return nil
-}
-
-func (l *loopyWriter) headerHandler(h *headerFrame) error {
- if l.side == serverSide {
- str, ok := l.estdStreams[h.streamID]
- if !ok {
- warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
- return nil
- }
- // Case 1.A: Server is responding back with headers.
- if !h.endStream {
- return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite)
- }
- // else: Case 1.B: Server wants to close stream.
-
- if str.state != empty { // either active or waiting on stream quota.
- // add it str's list of items.
- str.itl.enqueue(h)
- return nil
- }
- if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil {
- return err
- }
- return l.cleanupStreamHandler(h.cleanup)
- }
- // Case 2: Client wants to originate stream.
- str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- }
- str.itl.enqueue(h)
- return l.originateStream(str)
-}
-
-func (l *loopyWriter) originateStream(str *outStream) error {
- hdr := str.itl.dequeue().(*headerFrame)
- sendPing, err := hdr.initStream(str.id)
- if err != nil {
- if err == ErrConnClosing {
- return err
- }
- // Other errors(errStreamDrain) need not close transport.
- return nil
- }
- if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
- return err
- }
- l.estdStreams[str.id] = str
- if sendPing {
- return l.pingHandler(&ping{data: [8]byte{}})
- }
- return nil
-}
-
-func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error {
- if onWrite != nil {
- onWrite()
- }
- l.hBuf.Reset()
- for _, f := range hf {
- if err := l.hEnc.WriteField(f); err != nil {
- warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
- }
- }
- var (
- err error
- endHeaders, first bool
- )
- first = true
- for !endHeaders {
- size := l.hBuf.Len()
- if size > http2MaxFrameLen {
- size = http2MaxFrameLen
- } else {
- endHeaders = true
- }
- if first {
- first = false
- err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{
- StreamID: streamID,
- BlockFragment: l.hBuf.Next(size),
- EndStream: endStream,
- EndHeaders: endHeaders,
- })
- } else {
- err = l.framer.fr.WriteContinuation(
- streamID,
- endHeaders,
- l.hBuf.Next(size),
- )
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (l *loopyWriter) preprocessData(df *dataFrame) error {
- str, ok := l.estdStreams[df.streamID]
- if !ok {
- return nil
- }
- // If we got data for a stream it means that
- // stream was originated and the headers were sent out.
- str.itl.enqueue(df)
- if str.state == empty {
- str.state = active
- l.activeStreams.enqueue(str)
- }
- return nil
-}
-
-func (l *loopyWriter) pingHandler(p *ping) error {
- if !p.ack {
- l.bdpEst.timesnap(p.data)
- }
- return l.framer.fr.WritePing(p.ack, p.data)
-
-}
-
-func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
- o.resp <- l.sendQuota
- return nil
-}
-
-func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
- c.onWrite()
- if str, ok := l.estdStreams[c.streamID]; ok {
- // On the server side it could be a trailers-only response or
- // a RST_STREAM before stream initialization thus the stream might
- // not be established yet.
- delete(l.estdStreams, c.streamID)
- str.deleteSelf()
- }
- if c.rst { // If RST_STREAM needs to be sent.
- if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
- return err
- }
- }
- if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
- return ErrConnClosing
- }
- return nil
-}
-
-func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error {
- if l.side == clientSide {
- l.draining = true
- if len(l.estdStreams) == 0 {
- return ErrConnClosing
- }
- }
- return nil
-}
-
-func (l *loopyWriter) goAwayHandler(g *goAway) error {
- // Handling of outgoing GoAway is very specific to side.
- if l.ssGoAwayHandler != nil {
- draining, err := l.ssGoAwayHandler(g)
- if err != nil {
- return err
- }
- l.draining = draining
- }
- return nil
-}
-
-func (l *loopyWriter) handle(i interface{}) error {
- switch i := i.(type) {
- case *incomingWindowUpdate:
- return l.incomingWindowUpdateHandler(i)
- case *outgoingWindowUpdate:
- return l.outgoingWindowUpdateHandler(i)
- case *incomingSettings:
- return l.incomingSettingsHandler(i)
- case *outgoingSettings:
- return l.outgoingSettingsHandler(i)
- case *headerFrame:
- return l.headerHandler(i)
- case *registerStream:
- return l.registerStreamHandler(i)
- case *cleanupStream:
- return l.cleanupStreamHandler(i)
- case *incomingGoAway:
- return l.incomingGoAwayHandler(i)
- case *dataFrame:
- return l.preprocessData(i)
- case *ping:
- return l.pingHandler(i)
- case *goAway:
- return l.goAwayHandler(i)
- case *outFlowControlSizeRequest:
- return l.outFlowControlSizeRequestHandler(i)
- default:
- return fmt.Errorf("transport: unknown control message type %T", i)
- }
-}
-
-func (l *loopyWriter) applySettings(ss []http2.Setting) error {
- for _, s := range ss {
- switch s.ID {
- case http2.SettingInitialWindowSize:
- o := l.oiws
- l.oiws = s.Val
- if o < l.oiws {
- // If the new limit is greater make all depleted streams active.
- for _, stream := range l.estdStreams {
- if stream.state == waitingOnStreamQuota {
- stream.state = active
- l.activeStreams.enqueue(stream)
- }
- }
- }
- case http2.SettingHeaderTableSize:
- updateHeaderTblSize(l.hEnc, s.Val)
- }
- }
- return nil
-}
-
-// processData removes the first stream from active streams, writes out at most 16KB
-// of its data and then puts it at the end of activeStreams if there's still more data
-// to be sent and stream has some stream-level flow control.
-func (l *loopyWriter) processData() (bool, error) {
- if l.sendQuota == 0 {
- return true, nil
- }
- str := l.activeStreams.dequeue() // Remove the first stream.
- if str == nil {
- return true, nil
- }
- dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
- // A data item is represented by a dataFrame, since it later translates into
- // multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
- // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
- // maximum possilbe HTTP2 frame size.
-
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
- // Client sends out empty data frame with endStream = true
- if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
- return false, err
- }
- str.itl.dequeue() // remove the empty data item from stream
- if str.itl.isEmpty() {
- str.state = empty
- } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
- if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
- return false, err
- }
- if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
- return false, nil
- }
- } else {
- l.activeStreams.enqueue(str)
- }
- return false, nil
- }
- var (
- idx int
- buf []byte
- )
- if len(dataItem.h) != 0 { // data header has not been written out yet.
- buf = dataItem.h
- } else {
- idx = 1
- buf = dataItem.d
- }
- size := http2MaxFrameLen
- if len(buf) < size {
- size = len(buf)
- }
- if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
- str.state = waitingOnStreamQuota
- return false, nil
- } else if strQuota < size {
- size = strQuota
- }
-
- if l.sendQuota < uint32(size) { // connection-level flow control.
- size = int(l.sendQuota)
- }
- // Now that outgoing flow controls are checked we can replenish str's write quota
- str.wq.replenish(size)
- var endStream bool
- // If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && size == len(buf) {
- // buf contains either data or it contains header but data is empty.
- if idx == 1 || len(dataItem.d) == 0 {
- endStream = true
- }
- }
- if dataItem.onEachWrite != nil {
- dataItem.onEachWrite()
- }
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
- return false, err
- }
- buf = buf[size:]
- str.bytesOutStanding += size
- l.sendQuota -= uint32(size)
- if idx == 0 {
- dataItem.h = buf
- } else {
- dataItem.d = buf
- }
-
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
- str.itl.dequeue()
- }
- if str.itl.isEmpty() {
- str.state = empty
- } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers.
- if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil {
- return false, err
- }
- if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
- return false, err
- }
- } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota.
- str.state = waitingOnStreamQuota
- } else { // Otherwise add it back to the list of active streams.
- l.activeStreams.enqueue(str)
- }
- return false, nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
deleted file mode 100644
index 9fa306b2e..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/defaults.go
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "math"
- "time"
-)
-
-const (
- // The default value of flow control window size in HTTP2 spec.
- defaultWindowSize = 65535
- // The initial window size for flow control.
- initialWindowSize = defaultWindowSize // for an RPC
- infinity = time.Duration(math.MaxInt64)
- defaultClientKeepaliveTime = infinity
- defaultClientKeepaliveTimeout = 20 * time.Second
- defaultMaxStreamsClient = 100
- defaultMaxConnectionIdle = infinity
- defaultMaxConnectionAge = infinity
- defaultMaxConnectionAgeGrace = infinity
- defaultServerKeepaliveTime = 2 * time.Hour
- defaultServerKeepaliveTimeout = 20 * time.Second
- defaultKeepalivePolicyMinTime = 5 * time.Minute
- // max window limit set by HTTP2 Specs.
- maxWindowSize = math.MaxInt32
- // defaultWriteQuota is the default value for number of data
- // bytes that each stream can schedule before some of it being
- // flushed out.
- defaultWriteQuota = 64 * 1024
- defaultClientMaxHeaderListSize = uint32(16 << 20)
- defaultServerMaxHeaderListSize = uint32(16 << 20)
-)
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
deleted file mode 100644
index 5ea997a7e..000000000
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package transport
-
-import (
- "fmt"
- "math"
- "sync"
- "sync/atomic"
-)
-
-// writeQuota is a soft limit on the amount of data a stream can
-// schedule before some of it is written out.
-type writeQuota struct {
- quota int32
- // get waits on read from when quota goes less than or equal to zero.
- // replenish writes on it when quota goes positive again.
- ch chan struct{}
- // done is triggered in error case.
- done <-chan struct{}
- // replenish is called by loopyWriter to give quota back to.
- // It is implemented as a field so that it can be updated
- // by tests.
- replenish func(n int)
-}
-
-func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
- w := &writeQuota{
- quota: sz,
- ch: make(chan struct{}, 1),
- done: done,
- }
- w.replenish = w.realReplenish
- return w
-}
-
-func (w *writeQuota) get(sz int32) error {
- for {
- if atomic.LoadInt32(&w.quota) > 0 {
- atomic.AddInt32(&w.quota, -sz)
- return nil
- }
- select {
- case <-w.ch:
- continue
- case <-w.done:
- return errStreamDone
- }
- }
-}
-
-func (w *writeQuota) realReplenish(n int) {
- sz := int32(n)
- a := atomic.AddInt32(&w.quota, sz)
- b := a - sz
- if b <= 0 && a > 0 {
- select {
- case w.ch <- struct{}{}:
- default:
- }
- }
-}
-
-type trInFlow struct {
- limit uint32
- unacked uint32
- effectiveWindowSize uint32
-}
-
-func (f *trInFlow) newLimit(n uint32) uint32 {
- d := n - f.limit
- f.limit = n
- f.updateEffectiveWindowSize()
- return d
-}
-
-func (f *trInFlow) onData(n uint32) uint32 {
- f.unacked += n
- if f.unacked >= f.limit/4 {
- w := f.unacked
- f.unacked = 0
- f.updateEffectiveWindowSize()
- return w
- }
- f.updateEffectiveWindowSize()
- return 0
-}
-
-func (f *trInFlow) reset() uint32 {
- w := f.unacked
- f.unacked = 0
- f.updateEffectiveWindowSize()
- return w
-}
-
-func (f *trInFlow) updateEffectiveWindowSize() {
- atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked)
-}
-
-func (f *trInFlow) getSize() uint32 {
- return atomic.LoadUint32(&f.effectiveWindowSize)
-}
-
-// TODO(mmukhi): Simplify this code.
-// inFlow deals with inbound flow control
-type inFlow struct {
- mu sync.Mutex
- // The inbound flow control limit for pending data.
- limit uint32
- // pendingData is the overall data which have been received but not been
- // consumed by applications.
- pendingData uint32
- // The amount of data the application has consumed but grpc has not sent
- // window update for them. Used to reduce window update frequency.
- pendingUpdate uint32
- // delta is the extra window update given by receiver when an application
- // is reading data bigger in size than the inFlow limit.
- delta uint32
-}
-
-// newLimit updates the inflow window to a new value n.
-// It assumes that n is always greater than the old limit.
-func (f *inFlow) newLimit(n uint32) uint32 {
- f.mu.Lock()
- d := n - f.limit
- f.limit = n
- f.mu.Unlock()
- return d
-}
-
-func (f *inFlow) maybeAdjust(n uint32) uint32 {
- if n > uint32(math.MaxInt32) {
- n = uint32(math.MaxInt32)
- }
- f.mu.Lock()
- // estSenderQuota is the receiver's view of the maximum number of bytes the sender
- // can send without a window update.
- estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
- // estUntransmittedData is the maximum number of bytes the sends might not have put
- // on the wire yet. A value of 0 or less means that we have already received all or
- // more bytes than the application is requesting to read.
- estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
- // This implies that unless we send a window update, the sender won't be able to send all the bytes
- // for this message. Therefore we must send an update over the limit since there's an active read
- // request from the application.
- if estUntransmittedData > estSenderQuota {
- // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec.
- if f.limit+n > maxWindowSize {
- f.delta = maxWindowSize - f.limit
- } else {
- // Send a window update for the whole message and not just the difference between
- // estUntransmittedData and estSenderQuota. This will be helpful in case the message
- // is padded; We will fallback on the current available window(at least a 1/4th of the limit).
- f.delta = n
- }
- f.mu.Unlock()
- return f.delta
- }
- f.mu.Unlock()
- return 0
-}
-
-// onData is invoked when some data frame is received. It updates pendingData.
-func (f *inFlow) onData(n uint32) error {
- f.mu.Lock()
- f.pendingData += n
- if f.pendingData+f.pendingUpdate > f.limit+f.delta {
- limit := f.limit
- rcvd := f.pendingData + f.pendingUpdate
- f.mu.Unlock()
- return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit)
- }
- f.mu.Unlock()
- return nil
-}
-
-// onRead is invoked when the application reads the data. It returns the window size
-// to be sent to the peer.
-func (f *inFlow) onRead(n uint32) uint32 {
- f.mu.Lock()
- if f.pendingData == 0 {
- f.mu.Unlock()
- return 0
- }
- f.pendingData -= n
- if n > f.delta {
- n -= f.delta
- f.delta = 0
- } else {
- f.delta -= n
- n = 0
- }
- f.pendingUpdate += n
- if f.pendingUpdate >= f.limit/4 {
- wu := f.pendingUpdate
- f.pendingUpdate = 0
- f.mu.Unlock()
- return wu
- }
- f.mu.Unlock()
- return 0
-}
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index 78eea1fc9..f8adc7e6d 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -16,8 +16,7 @@
*
*/
-// Package keepalive defines configurable parameters for point-to-point
-// healthcheck.
+// Package keepalive defines configurable parameters for point-to-point healthcheck.
package keepalive
import (
@@ -25,59 +24,42 @@ import (
)
// ClientParameters is used to set keepalive parameters on the client-side.
-// These configure how the client will actively probe to notice when a
-// connection is broken and send pings so intermediaries will be aware of the
-// liveness of the connection. Make sure these parameters are set in
-// coordination with the keepalive policy on the server, as incompatible
-// settings can result in closing of connection.
+// These configure how the client will actively probe to notice when a connection is broken
+// and send pings so intermediaries will be aware of the liveness of the connection.
+// Make sure these parameters are set in coordination with the keepalive policy on the server,
+// as incompatible settings can result in closing of connection.
type ClientParameters struct {
- // After a duration of this time if the client doesn't see any activity it
- // pings the server to see if the transport is still alive.
+ // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive.
Time time.Duration // The current default value is infinity.
- // After having pinged for keepalive check, the client waits for a duration
- // of Timeout and if no activity is seen even after that the connection is
- // closed.
+ // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that
+ // the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
- // If true, client sends keepalive pings even with no active RPCs. If false,
- // when there are no active RPCs, Time and Timeout will be ignored and no
- // keepalive pings will be sent.
+ // If true, client runs keepalive checks even with no active RPCs.
PermitWithoutStream bool // false by default.
}
-// ServerParameters is used to set keepalive and max-age parameters on the
-// server-side.
+// ServerParameters is used to set keepalive and max-age parameters on the server-side.
type ServerParameters struct {
- // MaxConnectionIdle is a duration for the amount of time after which an
- // idle connection would be closed by sending a GoAway. Idleness duration is
- // defined since the most recent time the number of outstanding RPCs became
- // zero or the connection establishment.
+ // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway.
+ // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment.
MaxConnectionIdle time.Duration // The current default value is infinity.
- // MaxConnectionAge is a duration for the maximum amount of time a
- // connection may exist before it will be closed by sending a GoAway. A
- // random jitter of +/-10% will be added to MaxConnectionAge to spread out
- // connection storms.
+ // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway.
+ // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms.
MaxConnectionAge time.Duration // The current default value is infinity.
- // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after
- // which the connection will be forcibly closed.
+ // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed.
MaxConnectionAgeGrace time.Duration // The current default value is infinity.
- // After a duration of this time if the server doesn't see any activity it
- // pings the client to see if the transport is still alive.
+ // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive.
Time time.Duration // The current default value is 2 hours.
- // After having pinged for keepalive check, the server waits for a duration
- // of Timeout and if no activity is seen even after that the connection is
- // closed.
+ // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
+ // the connection is closed.
Timeout time.Duration // The current default value is 20 seconds.
}
-// EnforcementPolicy is used to set keepalive enforcement policy on the
-// server-side. Server will close connection with a client that violates this
-// policy.
+// EnforcementPolicy is used to set keepalive enforcement policy on the server-side.
+// Server will close connection with a client that violates this policy.
type EnforcementPolicy struct {
- // MinTime is the minimum amount of time a client should wait before sending
- // a keepalive ping.
+ // MinTime is the minimum amount of time a client should wait before sending a keepalive ping.
MinTime time.Duration // The current default value is 5 minutes.
- // If true, server allows keepalive pings even when there are no active
- // streams(RPCs). If false, and client sends ping when there are no active
- // streams, server will send GOAWAY and close the connection.
+ // If true, server expects keepalive pings even when there are no active streams(RPCs).
PermitWithoutStream bool // false by default.
}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index bd2eaf408..ccfea5d45 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -17,8 +17,7 @@
*/
// Package metadata define the structure of the metadata supported by gRPC library.
-// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
-// for more information about custom-metadata.
+// Please refer to https://grpc.io/docs/guides/wire.html for more information about custom-metadata.
package metadata // import "google.golang.org/grpc/metadata"
import (
@@ -28,9 +27,7 @@ import (
"golang.org/x/net/context"
)
-// DecodeKeyValue returns k, v, nil.
-//
-// Deprecated: use k and v directly instead.
+// DecodeKeyValue returns k, v, nil. It is deprecated and should not be used.
func DecodeKeyValue(k, v string) (string, string, error) {
return k, v, nil
}
@@ -97,30 +94,6 @@ func (md MD) Copy() MD {
return Join(md)
}
-// Get obtains the values for a given key.
-func (md MD) Get(k string) []string {
- k = strings.ToLower(k)
- return md[k]
-}
-
-// Set sets the value of a given key with a slice of values.
-func (md MD) Set(k string, vals ...string) {
- if len(vals) == 0 {
- return
- }
- k = strings.ToLower(k)
- md[k] = vals
-}
-
-// Append adds the values to key k, not overwriting what was already stored at that key.
-func (md MD) Append(k string, vals ...string) {
- if len(vals) == 0 {
- return
- }
- k = strings.ToLower(k)
- md[k] = append(md[k], vals...)
-}
-
// Join joins any number of mds into a single MD.
// The order of values for each key is determined by the order in which
// the mds containing those values are presented to Join.
@@ -142,26 +115,9 @@ func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
}
-// NewOutgoingContext creates a new context with outgoing md attached. If used
-// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
-// overwrite any previously-appended metadata.
+// NewOutgoingContext creates a new context with outgoing md attached.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
- return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
-}
-
-// AppendToOutgoingContext returns a new context with the provided kv merged
-// with any existing metadata in the context. Please refer to the
-// documentation of Pairs for a description of kv.
-func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
- if len(kv)%2 == 1 {
- panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
- }
- md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
- added := make([][]string, len(md.added)+1)
- copy(added, md.added)
- added[len(added)-1] = make([]string, len(kv))
- copy(added[len(added)-1], kv)
- return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
+ return context.WithValue(ctx, mdOutgoingKey{}, md)
}
// FromIncomingContext returns the incoming metadata in ctx if it exists. The
@@ -172,39 +128,10 @@ func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
return
}
-// FromOutgoingContextRaw returns the un-merged, intermediary contents
-// of rawMD. Remember to perform strings.ToLower on the keys. The returned
-// MD should not be modified. Writing to it may cause races. Modification
-// should be made to copies of the returned MD.
-//
-// This is intended for gRPC-internal use ONLY.
-func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
- raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
- if !ok {
- return nil, nil, false
- }
-
- return raw.md, raw.added, true
-}
-
// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
-func FromOutgoingContext(ctx context.Context) (MD, bool) {
- raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
- if !ok {
- return nil, false
- }
-
- mds := make([]MD, 0, len(raw.added)+1)
- mds = append(mds, raw.md)
- for _, vv := range raw.added {
- mds = append(mds, Pairs(vv...))
- }
- return Join(mds...), ok
-}
-
-type rawMD struct {
- md MD
- added [][]string
+// Modification should be made to the copies of the returned MD.
+func FromOutgoingContext(ctx context.Context) (md MD, ok bool) {
+ md, ok = ctx.Value(mdOutgoingKey{}).(MD)
+ return
}
diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go
index 0f8a908ea..7e69a2ca0 100644
--- a/vendor/google.golang.org/grpc/naming/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go
@@ -153,10 +153,10 @@ type ipWatcher struct {
updateChan chan *Update
}
-// Next returns the address resolution Update for the target. For IP address,
-// the resolution is itself, thus polling name server is unnecessary. Therefore,
+// Next returns the adrress resolution Update for the target. For IP address,
+// the resolution is itself, thus polling name server is unncessary. Therefore,
// Next() will return an Update the first time it is called, and will be blocked
-// for all following calls as no Update exists until watcher is closed.
+// for all following calls as no Update exisits until watcher is closed.
func (i *ipWatcher) Next() ([]*Update, error) {
u, ok := <-i.updateChan
if !ok {
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
index 8cc39e937..1af7e32f8 100644
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ b/vendor/google.golang.org/grpc/naming/naming.go
@@ -18,26 +18,20 @@
// Package naming defines the naming API and related data structures for gRPC.
// The interface is EXPERIMENTAL and may be suject to change.
-//
-// Deprecated: please use package resolver.
package naming
// Operation defines the corresponding operations for a name resolution change.
-//
-// Deprecated: please use package resolver.
type Operation uint8
const (
// Add indicates a new address is added.
Add Operation = iota
- // Delete indicates an existing address is deleted.
+ // Delete indicates an exisiting address is deleted.
Delete
)
// Update defines a name resolution update. Notice that it is not valid having both
// empty string Addr and nil Metadata in an Update.
-//
-// Deprecated: please use package resolver.
type Update struct {
// Op indicates the operation of the update.
Op Operation
@@ -49,16 +43,12 @@ type Update struct {
}
// Resolver creates a Watcher for a target to track its resolution changes.
-//
-// Deprecated: please use package resolver.
type Resolver interface {
// Resolve creates a Watcher for target.
Resolve(target string) (Watcher, error)
}
// Watcher watches for the updates on the specified target.
-//
-// Deprecated: please use package resolver.
type Watcher interface {
// Next blocks until an update or error happens. It may return one or more
// updates. The first call should get the full set of the results. It should
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 76cc456aa..db82bfb3a 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -19,16 +19,14 @@
package grpc
import (
- "io"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
)
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
@@ -38,10 +36,6 @@ type pickerWrapper struct {
done bool
blockingCh chan struct{}
picker balancer.Picker
-
- // The latest connection happened.
- connErrMu sync.Mutex
- connErr error
}
func newPickerWrapper() *pickerWrapper {
@@ -49,19 +43,6 @@ func newPickerWrapper() *pickerWrapper {
return bp
}
-func (bp *pickerWrapper) updateConnectionError(err error) {
- bp.connErrMu.Lock()
- bp.connErr = err
- bp.connErrMu.Unlock()
-}
-
-func (bp *pickerWrapper) connectionError() error {
- bp.connErrMu.Lock()
- err := bp.connErr
- bp.connErrMu.Unlock()
- return err
-}
-
// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
bp.mu.Lock()
@@ -76,23 +57,6 @@ func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
bp.mu.Unlock()
}
-func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
- acw.mu.Lock()
- ac := acw.ac
- acw.mu.Unlock()
- ac.incrCallsStarted()
- return func(b balancer.DoneInfo) {
- if b.Err != nil && b.Err != io.EOF {
- ac.incrCallsFailed()
- } else {
- ac.incrCallsSucceeded()
- }
- if done != nil {
- done(b)
- }
- }
-}
-
// pick returns the transport that will be used for the RPC.
// It may block in the following cases:
// - there's no picker
@@ -143,7 +107,7 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
if !failfast {
continue
}
- return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
+ return nil, nil, status.Errorf(codes.Unavailable, "%v", err)
default:
// err is some other error.
return nil, nil, toRPCErr(err)
@@ -156,9 +120,6 @@ func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.
continue
}
if t, ok := acw.getAddrConn().getReadyTransport(); ok {
- if channelz.IsOn() {
- return t, doneChannelzWrapper(acw, done), nil
- }
return t, done, nil
}
grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
index bda4309c0..bf659d49d 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/pickfirst.go
@@ -56,7 +56,6 @@ func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err er
if b.sc == nil {
b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
if err != nil {
- //TODO(yuxuanli): why not change the cc state to Idle?
grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
return
}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
index ae5aa7dbe..b40725327 100644
--- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
@@ -1,6 +1,22 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: grpc_reflection_v1alpha/reflection.proto
+/*
+Package grpc_reflection_v1alpha is a generated protocol buffer package.
+
+It is generated from these files:
+ grpc_reflection_v1alpha/reflection.proto
+
+It has these top-level messages:
+ ServerReflectionRequest
+ ExtensionRequest
+ ServerReflectionResponse
+ FileDescriptorResponse
+ ExtensionNumberResponse
+ ListServiceResponse
+ ServiceResponse
+ ErrorResponse
+*/
package grpc_reflection_v1alpha
import proto "github.com/golang/protobuf/proto"
@@ -25,7 +41,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The message sent by the client when calling ServerReflectionInfo method.
type ServerReflectionRequest struct {
- Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
+ Host string `protobuf:"bytes,1,opt,name=host" json:"host,omitempty"`
// To use reflection service, the client should set one of the following
// fields in message_request. The server distinguishes requests by their
// defined field and then handles them using corresponding methods.
@@ -36,76 +52,39 @@ type ServerReflectionRequest struct {
// *ServerReflectionRequest_FileContainingExtension
// *ServerReflectionRequest_AllExtensionNumbersOfType
// *ServerReflectionRequest_ListServices
- MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
}
-func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} }
-func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) }
-func (*ServerReflectionRequest) ProtoMessage() {}
-func (*ServerReflectionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_reflection_178bd1e101bf8b63, []int{0}
-}
-func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServerReflectionRequest.Unmarshal(m, b)
-}
-func (m *ServerReflectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServerReflectionRequest.Marshal(b, m, deterministic)
-}
-func (dst *ServerReflectionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerReflectionRequest.Merge(dst, src)
-}
-func (m *ServerReflectionRequest) XXX_Size() int {
- return xxx_messageInfo_ServerReflectionRequest.Size(m)
-}
-func (m *ServerReflectionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerReflectionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerReflectionRequest proto.InternalMessageInfo
-
-func (m *ServerReflectionRequest) GetHost() string {
- if m != nil {
- return m.Host
- }
- return ""
-}
+func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} }
+func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) }
+func (*ServerReflectionRequest) ProtoMessage() {}
+func (*ServerReflectionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type isServerReflectionRequest_MessageRequest interface {
isServerReflectionRequest_MessageRequest()
}
type ServerReflectionRequest_FileByFilename struct {
- FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"`
+ FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,oneof"`
}
-
type ServerReflectionRequest_FileContainingSymbol struct {
- FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"`
+ FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,oneof"`
}
-
type ServerReflectionRequest_FileContainingExtension struct {
- FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"`
+ FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,oneof"`
}
-
type ServerReflectionRequest_AllExtensionNumbersOfType struct {
- AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"`
+ AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,oneof"`
}
-
type ServerReflectionRequest_ListServices struct {
- ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"`
+ ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,oneof"`
}
-func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {}
-
-func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {}
-
-func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {}
-
+func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {}
+func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {}
+func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {}
func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {}
-
-func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {}
+func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {}
func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest {
if m != nil {
@@ -114,6 +93,13 @@ func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_
return nil
}
+func (m *ServerReflectionRequest) GetHost() string {
+ if m != nil {
+ return m.Host
+ }
+ return ""
+}
+
func (m *ServerReflectionRequest) GetFileByFilename() string {
if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok {
return x.FileByFilename
@@ -237,24 +223,24 @@ func _ServerReflectionRequest_OneofSizer(msg proto.Message) (n int) {
// message_request
switch x := m.MessageRequest.(type) {
case *ServerReflectionRequest_FileByFilename:
- n += 1 // tag and wire
+ n += proto.SizeVarint(3<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.FileByFilename)))
n += len(x.FileByFilename)
case *ServerReflectionRequest_FileContainingSymbol:
- n += 1 // tag and wire
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.FileContainingSymbol)))
n += len(x.FileContainingSymbol)
case *ServerReflectionRequest_FileContainingExtension:
s := proto.Size(x.FileContainingExtension)
- n += 1 // tag and wire
+ n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ServerReflectionRequest_AllExtensionNumbersOfType:
- n += 1 // tag and wire
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.AllExtensionNumbersOfType)))
n += len(x.AllExtensionNumbersOfType)
case *ServerReflectionRequest_ListServices:
- n += 1 // tag and wire
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(len(x.ListServices)))
n += len(x.ListServices)
case nil:
@@ -268,36 +254,14 @@ func _ServerReflectionRequest_OneofSizer(msg proto.Message) (n int) {
// file_containing_extension.
type ExtensionRequest struct {
// Fully-qualified type name. The format should be <package>.<type>
- ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"`
- ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType" json:"containing_type,omitempty"`
+ ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"`
}
-func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} }
-func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) }
-func (*ExtensionRequest) ProtoMessage() {}
-func (*ExtensionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_reflection_178bd1e101bf8b63, []int{1}
-}
-func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExtensionRequest.Unmarshal(m, b)
-}
-func (m *ExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExtensionRequest.Marshal(b, m, deterministic)
-}
-func (dst *ExtensionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionRequest.Merge(dst, src)
-}
-func (m *ExtensionRequest) XXX_Size() int {
- return xxx_messageInfo_ExtensionRequest.Size(m)
-}
-func (m *ExtensionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionRequest proto.InternalMessageInfo
+func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} }
+func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRequest) ProtoMessage() {}
+func (*ExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *ExtensionRequest) GetContainingType() string {
if m != nil {
@@ -315,88 +279,46 @@ func (m *ExtensionRequest) GetExtensionNumber() int32 {
// The message sent by the server to answer ServerReflectionInfo method.
type ServerReflectionResponse struct {
- ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"`
- OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"`
- // The server sets one of the following fields according to the
- // message_request in the request.
+ ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost" json:"valid_host,omitempty"`
+ OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest" json:"original_request,omitempty"`
+ // The server set one of the following fields according to the message_request
+ // in the request.
//
// Types that are valid to be assigned to MessageResponse:
// *ServerReflectionResponse_FileDescriptorResponse
// *ServerReflectionResponse_AllExtensionNumbersResponse
// *ServerReflectionResponse_ListServicesResponse
// *ServerReflectionResponse_ErrorResponse
- MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
}
-func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} }
-func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) }
-func (*ServerReflectionResponse) ProtoMessage() {}
-func (*ServerReflectionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_reflection_178bd1e101bf8b63, []int{2}
-}
-func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServerReflectionResponse.Unmarshal(m, b)
-}
-func (m *ServerReflectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServerReflectionResponse.Marshal(b, m, deterministic)
-}
-func (dst *ServerReflectionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerReflectionResponse.Merge(dst, src)
-}
-func (m *ServerReflectionResponse) XXX_Size() int {
- return xxx_messageInfo_ServerReflectionResponse.Size(m)
-}
-func (m *ServerReflectionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerReflectionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerReflectionResponse proto.InternalMessageInfo
-
-func (m *ServerReflectionResponse) GetValidHost() string {
- if m != nil {
- return m.ValidHost
- }
- return ""
-}
-
-func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
- if m != nil {
- return m.OriginalRequest
- }
- return nil
-}
+func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} }
+func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) }
+func (*ServerReflectionResponse) ProtoMessage() {}
+func (*ServerReflectionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
type isServerReflectionResponse_MessageResponse interface {
isServerReflectionResponse_MessageResponse()
}
type ServerReflectionResponse_FileDescriptorResponse struct {
- FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"`
+ FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,oneof"`
}
-
type ServerReflectionResponse_AllExtensionNumbersResponse struct {
- AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"`
+ AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,oneof"`
}
-
type ServerReflectionResponse_ListServicesResponse struct {
- ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"`
+ ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,oneof"`
}
-
type ServerReflectionResponse_ErrorResponse struct {
- ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"`
+ ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,oneof"`
}
func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {}
-
func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() {
}
-
func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {}
-
-func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {}
+func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {}
func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse {
if m != nil {
@@ -405,6 +327,20 @@ func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionRespon
return nil
}
+func (m *ServerReflectionResponse) GetValidHost() string {
+ if m != nil {
+ return m.ValidHost
+ }
+ return ""
+}
+
+func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
+ if m != nil {
+ return m.OriginalRequest
+ }
+ return nil
+}
+
func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse {
if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok {
return x.FileDescriptorResponse
@@ -520,22 +456,22 @@ func _ServerReflectionResponse_OneofSizer(msg proto.Message) (n int) {
switch x := m.MessageResponse.(type) {
case *ServerReflectionResponse_FileDescriptorResponse:
s := proto.Size(x.FileDescriptorResponse)
- n += 1 // tag and wire
+ n += proto.SizeVarint(4<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ServerReflectionResponse_AllExtensionNumbersResponse:
s := proto.Size(x.AllExtensionNumbersResponse)
- n += 1 // tag and wire
+ n += proto.SizeVarint(5<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ServerReflectionResponse_ListServicesResponse:
s := proto.Size(x.ListServicesResponse)
- n += 1 // tag and wire
+ n += proto.SizeVarint(6<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case *ServerReflectionResponse_ErrorResponse:
s := proto.Size(x.ErrorResponse)
- n += 1 // tag and wire
+ n += proto.SizeVarint(7<<3 | proto.WireBytes)
n += proto.SizeVarint(uint64(s))
n += s
case nil:
@@ -552,35 +488,13 @@ type FileDescriptorResponse struct {
// Serialized FileDescriptorProto messages. We avoid taking a dependency on
// descriptor.proto, which uses proto2 only features, by making them opaque
// bytes instead.
- FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} }
-func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorResponse) ProtoMessage() {}
-func (*FileDescriptorResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_reflection_178bd1e101bf8b63, []int{3}
-}
-func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileDescriptorResponse.Unmarshal(m, b)
-}
-func (m *FileDescriptorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileDescriptorResponse.Marshal(b, m, deterministic)
-}
-func (dst *FileDescriptorResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileDescriptorResponse.Merge(dst, src)
-}
-func (m *FileDescriptorResponse) XXX_Size() int {
- return xxx_messageInfo_FileDescriptorResponse.Size(m)
-}
-func (m *FileDescriptorResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_FileDescriptorResponse.DiscardUnknown(m)
+ FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
}
-var xxx_messageInfo_FileDescriptorResponse proto.InternalMessageInfo
+func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} }
+func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorResponse) ProtoMessage() {}
+func (*FileDescriptorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
if m != nil {
@@ -594,36 +508,14 @@ func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
type ExtensionNumberResponse struct {
// Full name of the base type, including the package name. The format
// is <package>.<type>
- BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"`
- ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} }
-func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) }
-func (*ExtensionNumberResponse) ProtoMessage() {}
-func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_reflection_178bd1e101bf8b63, []int{4}
-}
-func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExtensionNumberResponse.Unmarshal(m, b)
-}
-func (m *ExtensionNumberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExtensionNumberResponse.Marshal(b, m, deterministic)
-}
-func (dst *ExtensionNumberResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionNumberResponse.Merge(dst, src)
-}
-func (m *ExtensionNumberResponse) XXX_Size() int {
- return xxx_messageInfo_ExtensionNumberResponse.Size(m)
-}
-func (m *ExtensionNumberResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionNumberResponse.DiscardUnknown(m)
+ BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName" json:"base_type_name,omitempty"`
+ ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber" json:"extension_number,omitempty"`
}
-var xxx_messageInfo_ExtensionNumberResponse proto.InternalMessageInfo
+func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} }
+func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) }
+func (*ExtensionNumberResponse) ProtoMessage() {}
+func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ExtensionNumberResponse) GetBaseTypeName() string {
if m != nil {
@@ -643,35 +535,13 @@ func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 {
type ListServiceResponse struct {
// The information of each service may be expanded in the future, so we use
// ServiceResponse message to encapsulate it.
- Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service" json:"service,omitempty"`
}
-func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} }
-func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ListServiceResponse) ProtoMessage() {}
-func (*ListServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_reflection_178bd1e101bf8b63, []int{5}
-}
-func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ListServiceResponse.Unmarshal(m, b)
-}
-func (m *ListServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ListServiceResponse.Marshal(b, m, deterministic)
-}
-func (dst *ListServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ListServiceResponse.Merge(dst, src)
-}
-func (m *ListServiceResponse) XXX_Size() int {
- return xxx_messageInfo_ListServiceResponse.Size(m)
-}
-func (m *ListServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ListServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ListServiceResponse proto.InternalMessageInfo
+func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} }
+func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) }
+func (*ListServiceResponse) ProtoMessage() {}
+func (*ListServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *ListServiceResponse) GetService() []*ServiceResponse {
if m != nil {
@@ -685,35 +555,13 @@ func (m *ListServiceResponse) GetService() []*ServiceResponse {
type ServiceResponse struct {
// Full name of a registered service, including its package name. The format
// is <package>.<service>
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
-func (m *ServiceResponse) Reset() { *m = ServiceResponse{} }
-func (m *ServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ServiceResponse) ProtoMessage() {}
-func (*ServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_reflection_178bd1e101bf8b63, []int{6}
-}
-func (m *ServiceResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServiceResponse.Unmarshal(m, b)
-}
-func (m *ServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServiceResponse.Marshal(b, m, deterministic)
-}
-func (dst *ServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceResponse.Merge(dst, src)
-}
-func (m *ServiceResponse) XXX_Size() int {
- return xxx_messageInfo_ServiceResponse.Size(m)
-}
-func (m *ServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceResponse proto.InternalMessageInfo
+func (m *ServiceResponse) Reset() { *m = ServiceResponse{} }
+func (m *ServiceResponse) String() string { return proto.CompactTextString(m) }
+func (*ServiceResponse) ProtoMessage() {}
+func (*ServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *ServiceResponse) GetName() string {
if m != nil {
@@ -725,36 +573,14 @@ func (m *ServiceResponse) GetName() string {
// The error code and error message sent by the server when an error occurs.
type ErrorResponse struct {
// This field uses the error codes defined in grpc::StatusCode.
- ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ErrorResponse) Reset() { *m = ErrorResponse{} }
-func (m *ErrorResponse) String() string { return proto.CompactTextString(m) }
-func (*ErrorResponse) ProtoMessage() {}
-func (*ErrorResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_reflection_178bd1e101bf8b63, []int{7}
-}
-func (m *ErrorResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ErrorResponse.Unmarshal(m, b)
-}
-func (m *ErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ErrorResponse.Marshal(b, m, deterministic)
-}
-func (dst *ErrorResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ErrorResponse.Merge(dst, src)
-}
-func (m *ErrorResponse) XXX_Size() int {
- return xxx_messageInfo_ErrorResponse.Size(m)
-}
-func (m *ErrorResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ErrorResponse.DiscardUnknown(m)
+ ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode" json:"error_code,omitempty"`
+ ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage" json:"error_message,omitempty"`
}
-var xxx_messageInfo_ErrorResponse proto.InternalMessageInfo
+func (m *ErrorResponse) Reset() { *m = ErrorResponse{} }
+func (m *ErrorResponse) String() string { return proto.CompactTextString(m) }
+func (*ErrorResponse) ProtoMessage() {}
+func (*ErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *ErrorResponse) GetErrorCode() int32 {
if m != nil {
@@ -789,9 +615,8 @@ var _ grpc.ClientConn
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
-// ServerReflectionClient is the client API for ServerReflection service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+// Client API for ServerReflection service
+
type ServerReflectionClient interface {
// The reflection service is structured as a bidirectional stream, ensuring
// all related requests go to a single server.
@@ -807,7 +632,7 @@ func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient {
}
func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) {
- stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...)
+ stream, err := grpc.NewClientStream(ctx, &_ServerReflection_serviceDesc.Streams[0], c.cc, "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...)
if err != nil {
return nil, err
}
@@ -837,7 +662,8 @@ func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionRe
return m, nil
}
-// ServerReflectionServer is the server API for ServerReflection service.
+// Server API for ServerReflection service
+
type ServerReflectionServer interface {
// The reflection service is structured as a bidirectional stream, ensuring
// all related requests go to a single server.
@@ -889,11 +715,9 @@ var _ServerReflection_serviceDesc = grpc.ServiceDesc{
Metadata: "grpc_reflection_v1alpha/reflection.proto",
}
-func init() {
- proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_reflection_178bd1e101bf8b63)
-}
+func init() { proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor0) }
-var fileDescriptor_reflection_178bd1e101bf8b63 = []byte{
+var fileDescriptor0 = []byte{
// 656 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40,
0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75,
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
index 99b00df0a..c52ccc6ab 100644
--- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
@@ -72,21 +72,21 @@ message ExtensionRequest {
message ServerReflectionResponse {
string valid_host = 1;
ServerReflectionRequest original_request = 2;
- // The server sets one of the following fields according to the
- // message_request in the request.
+ // The server set one of the following fields according to the message_request
+ // in the request.
oneof message_response {
// This message is used to answer file_by_filename, file_containing_symbol,
- // file_containing_extension requests with transitive dependencies.
- // As the repeated label is not allowed in oneof fields, we use a
+ // file_containing_extension requests with transitive dependencies. As
+ // the repeated label is not allowed in oneof fields, we use a
// FileDescriptorResponse message to encapsulate the repeated fields.
// The reflection service is allowed to avoid sending FileDescriptorProtos
// that were previously sent in response to earlier requests in the stream.
FileDescriptorResponse file_descriptor_response = 4;
- // This message is used to answer all_extension_numbers_of_type requests.
+ // This message is used to answer all_extension_numbers_of_type requst.
ExtensionNumberResponse all_extension_numbers_response = 5;
- // This message is used to answer list_services requests.
+ // This message is used to answer list_services request.
ListServiceResponse list_services_response = 6;
// This message is used when an error occurs.
diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go
index dd22a2da7..1bfbf3e78 100644
--- a/vendor/google.golang.org/grpc/reflection/serverreflection.go
+++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go
@@ -45,8 +45,7 @@ import (
"io"
"io/ioutil"
"reflect"
- "sort"
- "sync"
+ "strings"
"github.com/golang/protobuf/proto"
dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
@@ -58,10 +57,8 @@ import (
type serverReflectionServer struct {
s *grpc.Server
-
- initSymbols sync.Once
- serviceNames []string
- symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files
+ // TODO add more cache if necessary
+ serviceInfo map[string]grpc.ServiceInfo // cache for s.GetServiceInfo()
}
// Register registers the server reflection service on the given gRPC server.
@@ -79,112 +76,6 @@ type protoMessage interface {
Descriptor() ([]byte, []int)
}
-func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) {
- s.initSymbols.Do(func() {
- serviceInfo := s.s.GetServiceInfo()
-
- s.symbols = map[string]*dpb.FileDescriptorProto{}
- s.serviceNames = make([]string, 0, len(serviceInfo))
- processed := map[string]struct{}{}
- for svc, info := range serviceInfo {
- s.serviceNames = append(s.serviceNames, svc)
- fdenc, ok := parseMetadata(info.Metadata)
- if !ok {
- continue
- }
- fd, err := decodeFileDesc(fdenc)
- if err != nil {
- continue
- }
- s.processFile(fd, processed)
- }
- sort.Strings(s.serviceNames)
- })
-
- return s.serviceNames, s.symbols
-}
-
-func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) {
- filename := fd.GetName()
- if _, ok := processed[filename]; ok {
- return
- }
- processed[filename] = struct{}{}
-
- prefix := fd.GetPackage()
-
- for _, msg := range fd.MessageType {
- s.processMessage(fd, prefix, msg)
- }
- for _, en := range fd.EnumType {
- s.processEnum(fd, prefix, en)
- }
- for _, ext := range fd.Extension {
- s.processField(fd, prefix, ext)
- }
- for _, svc := range fd.Service {
- svcName := fqn(prefix, svc.GetName())
- s.symbols[svcName] = fd
- for _, meth := range svc.Method {
- name := fqn(svcName, meth.GetName())
- s.symbols[name] = fd
- }
- }
-
- for _, dep := range fd.Dependency {
- fdenc := proto.FileDescriptor(dep)
- fdDep, err := decodeFileDesc(fdenc)
- if err != nil {
- continue
- }
- s.processFile(fdDep, processed)
- }
-}
-
-func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) {
- msgName := fqn(prefix, msg.GetName())
- s.symbols[msgName] = fd
-
- for _, nested := range msg.NestedType {
- s.processMessage(fd, msgName, nested)
- }
- for _, en := range msg.EnumType {
- s.processEnum(fd, msgName, en)
- }
- for _, ext := range msg.Extension {
- s.processField(fd, msgName, ext)
- }
- for _, fld := range msg.Field {
- s.processField(fd, msgName, fld)
- }
- for _, oneof := range msg.OneofDecl {
- oneofName := fqn(msgName, oneof.GetName())
- s.symbols[oneofName] = fd
- }
-}
-
-func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) {
- enName := fqn(prefix, en.GetName())
- s.symbols[enName] = fd
-
- for _, val := range en.Value {
- valName := fqn(enName, val.GetName())
- s.symbols[valName] = fd
- }
-}
-
-func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) {
- fldName := fqn(prefix, fld.GetName())
- s.symbols[fldName] = fd
-}
-
-func fqn(prefix, name string) string {
- if prefix == "" {
- return name
- }
- return prefix + "." + name
-}
-
// fileDescForType gets the file descriptor for the given type.
// The given type should be a proto message.
func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) {
@@ -194,12 +85,12 @@ func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDesc
}
enc, _ := m.Descriptor()
- return decodeFileDesc(enc)
+ return s.decodeFileDesc(enc)
}
// decodeFileDesc does decompression and unmarshalling on the given
// file descriptor byte slice.
-func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) {
+func (s *serverReflectionServer) decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) {
raw, err := decompress(enc)
if err != nil {
return nil, fmt.Errorf("failed to decompress enc: %v", err)
@@ -225,7 +116,7 @@ func decompress(b []byte) ([]byte, error) {
return out, nil
}
-func typeForName(name string) (reflect.Type, error) {
+func (s *serverReflectionServer) typeForName(name string) (reflect.Type, error) {
pt := proto.MessageType(name)
if pt == nil {
return nil, fmt.Errorf("unknown type: %q", name)
@@ -235,7 +126,7 @@ func typeForName(name string) (reflect.Type, error) {
return st, nil
}
-func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) {
+func (s *serverReflectionServer) fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) {
m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to create message from type: %v", st)
@@ -253,7 +144,7 @@ func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescripto
return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext)
}
- return decodeFileDesc(proto.FileDescriptor(extDesc.Filename))
+ return s.decodeFileDesc(proto.FileDescriptor(extDesc.Filename))
}
func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) {
@@ -277,13 +168,53 @@ func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte
if enc == nil {
return nil, fmt.Errorf("unknown file: %v", name)
}
- fd, err := decodeFileDesc(enc)
+ fd, err := s.decodeFileDesc(enc)
if err != nil {
return nil, err
}
return proto.Marshal(fd)
}
+// serviceMetadataForSymbol finds the metadata for name in s.serviceInfo.
+// name should be a service name or a method name.
+func (s *serverReflectionServer) serviceMetadataForSymbol(name string) (interface{}, error) {
+ if s.serviceInfo == nil {
+ s.serviceInfo = s.s.GetServiceInfo()
+ }
+
+ // Check if it's a service name.
+ if info, ok := s.serviceInfo[name]; ok {
+ return info.Metadata, nil
+ }
+
+ // Check if it's a method name.
+ pos := strings.LastIndex(name, ".")
+ // Not a valid method name.
+ if pos == -1 {
+ return nil, fmt.Errorf("unknown symbol: %v", name)
+ }
+
+ info, ok := s.serviceInfo[name[:pos]]
+ // Substring before last "." is not a service name.
+ if !ok {
+ return nil, fmt.Errorf("unknown symbol: %v", name)
+ }
+
+ // Search the method name in info.Methods.
+ var found bool
+ for _, m := range info.Methods {
+ if m.Name == name[pos+1:] {
+ found = true
+ break
+ }
+ }
+ if found {
+ return info.Metadata, nil
+ }
+
+ return nil, fmt.Errorf("unknown symbol: %v", name)
+}
+
// parseMetadata finds the file descriptor bytes specified meta.
// For SupportPackageIsVersion4, m is the name of the proto file, we
// call proto.FileDescriptor to get the byte slice.
@@ -306,21 +237,33 @@ func parseMetadata(meta interface{}) ([]byte, bool) {
// does marshalling on it and returns the marshalled result.
// The given symbol can be a type, a service or a method.
func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) {
- _, symbols := s.getSymbols()
- fd := symbols[name]
- if fd == nil {
- // Check if it's a type name that was not present in the
- // transitive dependencies of the registered services.
- if st, err := typeForName(name); err == nil {
- fd, err = s.fileDescForType(st)
- if err != nil {
- return nil, err
- }
+ var (
+ fd *dpb.FileDescriptorProto
+ )
+ // Check if it's a type name.
+ if st, err := s.typeForName(name); err == nil {
+ fd, err = s.fileDescForType(st)
+ if err != nil {
+ return nil, err
}
- }
+ } else { // Check if it's a service name or a method name.
+ meta, err := s.serviceMetadataForSymbol(name)
- if fd == nil {
- return nil, fmt.Errorf("unknown symbol: %v", name)
+ // Metadata not found.
+ if err != nil {
+ return nil, err
+ }
+
+ // Metadata not valid.
+ enc, ok := parseMetadata(meta)
+ if !ok {
+ return nil, fmt.Errorf("invalid file descriptor for symbol: %v", name)
+ }
+
+ fd, err = s.decodeFileDesc(enc)
+ if err != nil {
+ return nil, err
+ }
}
return proto.Marshal(fd)
@@ -329,11 +272,11 @@ func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) (
// fileDescEncodingContainingExtension finds the file descriptor containing given extension,
// does marshalling on it and returns the marshalled result.
func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) {
- st, err := typeForName(typeName)
+ st, err := s.typeForName(typeName)
if err != nil {
return nil, err
}
- fd, err := fileDescContainingExtension(st, extNum)
+ fd, err := s.fileDescContainingExtension(st, extNum)
if err != nil {
return nil, err
}
@@ -342,7 +285,7 @@ func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName st
// allExtensionNumbersForTypeName returns all extension numbers for the given type.
func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) {
- st, err := typeForName(name)
+ st, err := s.typeForName(name)
if err != nil {
return nil, err
}
@@ -431,12 +374,14 @@ func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflectio
}
}
case *rpb.ServerReflectionRequest_ListServices:
- svcNames, _ := s.getSymbols()
- serviceResponses := make([]*rpb.ServiceResponse, len(svcNames))
- for i, n := range svcNames {
- serviceResponses[i] = &rpb.ServiceResponse{
+ if s.serviceInfo == nil {
+ s.serviceInfo = s.s.GetServiceInfo()
+ }
+ serviceResponses := make([]*rpb.ServiceResponse, 0, len(s.serviceInfo))
+ for n := range s.serviceInfo {
+ serviceResponses = append(serviceResponses, &rpb.ServiceResponse{
Name: n,
- }
+ })
}
out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{
ListServicesResponse: &rpb.ListServiceResponse{
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
index 4af67422c..a543a709a 100644
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,6 +24,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "math/rand"
"net"
"os"
"strconv"
@@ -33,8 +34,6 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/resolver"
)
@@ -51,29 +50,21 @@ const (
txtAttribute = "grpc_config="
)
-var (
- errMissingAddr = errors.New("dns resolver: missing address")
-
- // Addresses ending with a colon that is supposed to be the separator
- // between host and port is not allowed. E.g. "::" is a valid address as
- // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
- // a colon as the host and port separator
- errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
-)
+var errMissingAddr = errors.New("missing address")
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
func NewBuilder() resolver.Builder {
- return &dnsBuilder{minFreq: defaultFreq}
+ return &dnsBuilder{freq: defaultFreq}
}
type dnsBuilder struct {
- // minimum frequency of polling the DNS server.
- minFreq time.Duration
+ // frequency of polling the DNS server.
+ freq time.Duration
}
// Build creates and starts a DNS resolver that watches the name resolution of the target.
func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
- host, port, err := parseTarget(target.Endpoint, defaultPort)
+ host, port, err := parseTarget(target.Endpoint)
if err != nil {
return nil, err
}
@@ -96,25 +87,14 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts
// DNS address (non-IP).
ctx, cancel := context.WithCancel(context.Background())
d := &dnsResolver{
- freq: b.minFreq,
- backoff: backoff.Exponential{MaxDelay: b.minFreq},
- host: host,
- port: port,
- ctx: ctx,
- cancel: cancel,
- cc: cc,
- t: time.NewTimer(0),
- rn: make(chan struct{}, 1),
- disableServiceConfig: opts.DisableServiceConfig,
- }
-
- if target.Authority == "" {
- d.resolver = defaultResolver
- } else {
- d.resolver, err = customAuthorityResolver(target.Authority)
- if err != nil {
- return nil, err
- }
+ freq: b.freq,
+ host: host,
+ port: port,
+ ctx: ctx,
+ cancel: cancel,
+ cc: cc,
+ t: time.NewTimer(0),
+ rn: make(chan struct{}, 1),
}
d.wg.Add(1)
@@ -127,12 +107,6 @@ func (b *dnsBuilder) Scheme() string {
return "dns"
}
-type netResolver interface {
- LookupHost(ctx context.Context, host string) (addrs []string, err error)
- LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
- LookupTXT(ctx context.Context, name string) (txts []string, err error)
-}
-
// ipResolver watches for the name resolution update for an IP address.
type ipResolver struct {
cc resolver.ClientConn
@@ -168,15 +142,12 @@ func (i *ipResolver) watcher() {
// dnsResolver watches for the name resolution update for a non-IP target.
type dnsResolver struct {
- freq time.Duration
- backoff backoff.Exponential
- retryCount int
- host string
- port string
- resolver netResolver
- ctx context.Context
- cancel context.CancelFunc
- cc resolver.ClientConn
+ freq time.Duration
+ host string
+ port string
+ ctx context.Context
+ cancel context.CancelFunc
+ cc resolver.ClientConn
// rn channel is used by ResolveNow() to force an immediate resolution of the target.
rn chan struct{}
t *time.Timer
@@ -186,8 +157,7 @@ type dnsResolver struct {
// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
// has data race with replaceNetFunc (WRITE the lookup function pointers).
- wg sync.WaitGroup
- disableServiceConfig bool
+ wg sync.WaitGroup
}
// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
@@ -215,31 +185,24 @@ func (d *dnsResolver) watcher() {
case <-d.rn:
}
result, sc := d.lookup()
- // Next lookup should happen within an interval defined by d.freq. It may be
- // more often due to exponential retry on empty address list.
- if len(result) == 0 {
- d.retryCount++
- d.t.Reset(d.backoff.Backoff(d.retryCount))
- } else {
- d.retryCount = 0
- d.t.Reset(d.freq)
- }
- d.cc.NewServiceConfig(sc)
+ // Next lookup should happen after an interval defined by d.freq.
+ d.t.Reset(d.freq)
+ d.cc.NewServiceConfig(string(sc))
d.cc.NewAddress(result)
}
}
func (d *dnsResolver) lookupSRV() []resolver.Address {
var newAddrs []resolver.Address
- _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
+ _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host)
if err != nil {
grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
return nil
}
for _, s := range srvs {
- lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
+ lbAddrs, err := lookupHost(d.ctx, s.Target)
if err != nil {
- grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
+ grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err)
continue
}
for _, a := range lbAddrs {
@@ -256,9 +219,9 @@ func (d *dnsResolver) lookupSRV() []resolver.Address {
}
func (d *dnsResolver) lookupTXT() string {
- ss, err := d.resolver.LookupTXT(d.ctx, d.host)
+ ss, err := lookupTXT(d.ctx, d.host)
if err != nil {
- grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
+ grpclog.Warningf("grpc: failed dns TXT record lookup due to %v.\n", err)
return ""
}
var res string
@@ -276,7 +239,7 @@ func (d *dnsResolver) lookupTXT() string {
func (d *dnsResolver) lookupHost() []resolver.Address {
var newAddrs []resolver.Address
- addrs, err := d.resolver.LookupHost(d.ctx, d.host)
+ addrs, err := lookupHost(d.ctx, d.host)
if err != nil {
grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
return nil
@@ -294,12 +257,10 @@ func (d *dnsResolver) lookupHost() []resolver.Address {
}
func (d *dnsResolver) lookup() ([]resolver.Address, string) {
- newAddrs := d.lookupSRV()
+ var newAddrs []resolver.Address
+ newAddrs = d.lookupSRV()
// Support fallback to non-balancer address.
newAddrs = append(newAddrs, d.lookupHost()...)
- if d.disableServiceConfig {
- return newAddrs, ""
- }
sc := d.lookupTXT()
return newAddrs, canaryingSC(sc)
}
@@ -318,16 +279,17 @@ func formatIP(addr string) (addrIP string, ok bool) {
return "[" + addr + "]", true
}
-// parseTarget takes the user input target string and default port, returns formatted host and port info.
+// parseTarget takes the user input target string, returns formatted host and port info.
// If target doesn't specify a port, set the port to be the defaultPort.
// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets
// are strippd when setting the host.
// examples:
-// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" defaultPort: "443" returns host: "ipv6-host", port: "443"
-// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
-func parseTarget(target, defaultPort string) (host, port string, err error) {
+// target: "www.google.com" returns host: "www.google.com", port: "443"
+// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
+// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
+// target: ":80" returns host: "localhost", port: "80"
+// target: ":" returns host: "localhost", port: "443"
+func parseTarget(target string) (host, port string, err error) {
if target == "" {
return "", "", errMissingAddr
}
@@ -336,15 +298,15 @@ func parseTarget(target, defaultPort string) (host, port string, err error) {
return target, defaultPort, nil
}
if host, port, err = net.SplitHostPort(target); err == nil {
- if port == "" {
- // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
- return "", "", errEndsWithColon
- }
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
host = "localhost"
}
+ if port == "" {
+ // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
+ port = defaultPort
+ }
return host, port, nil
}
if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil {
@@ -377,7 +339,12 @@ func chosenByPercentage(a *int) bool {
if a == nil {
return true
}
- return grpcrand.Intn(100)+1 <= *a
+ s := rand.NewSource(time.Now().UnixNano())
+ r := rand.New(s)
+ if r.Intn(100)+1 > *a {
+ return false
+ }
+ return true
}
func canaryingSC(js string) string {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go
index 3ee8740f1..b466bc8f6 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/resolver/dns/go17.go
@@ -1,6 +1,8 @@
+// +build go1.6, !go1.8
+
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -16,20 +18,18 @@
*
*/
-// Package envconfig contains grpc settings configured by environment variables.
-package envconfig
+package dns
import (
- "os"
- "strings"
-)
+ "net"
-const (
- prefix = "GRPC_GO_"
- retryStr = prefix + "RETRY"
+ "golang.org/x/net/context"
)
var (
- // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
- Retry = strings.EqualFold(os.Getenv(retryStr), "on")
+ lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) }
+ lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
+ return net.LookupSRV(service, proto, name)
+ }
+ lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) }
)
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go
index 1d4da952d..fa34f14ca 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux_pre_go19.go
+++ b/vendor/google.golang.org/grpc/resolver/dns/go18.go
@@ -1,8 +1,8 @@
-// +build !linux !go1.9 appengine
+// +build go1.8
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,9 +18,12 @@
*
*/
-package channelz
+package dns
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c interface{}) *SocketOptionData {
- return nil
-}
+import "net"
+
+var (
+ lookupHost = net.DefaultResolver.LookupHost
+ lookupSRV = net.DefaultResolver.LookupSRV
+ lookupTXT = net.DefaultResolver.LookupTXT
+)
diff --git a/vendor/google.golang.org/grpc/resolver/dns/go19.go b/vendor/google.golang.org/grpc/resolver/dns/go19.go
deleted file mode 100644
index 9886de275..000000000
--- a/vendor/google.golang.org/grpc/resolver/dns/go19.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// +build go1.9
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package dns
-
-import (
- "net"
-
- "golang.org/x/net/context"
-)
-
-var (
- defaultResolver netResolver = net.DefaultResolver
-)
-
-const defaultDNSSvrPort = "53"
-
-var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
- return func(ctx context.Context, network, address string) (net.Conn, error) {
- var dialer net.Dialer
- return dialer.DialContext(ctx, network, authority)
- }
-}
-
-var customAuthorityResolver = func(authority string) (netResolver, error) {
- host, port, err := parseTarget(authority, defaultDNSSvrPort)
- if err != nil {
- return nil, err
- }
-
- authorityWithPort := net.JoinHostPort(host, port)
-
- return &net.Resolver{
- PreferGo: true,
- Dial: customAuthorityDialler(authorityWithPort),
- }, nil
-}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/pre_go19.go b/vendor/google.golang.org/grpc/resolver/dns/pre_go19.go
deleted file mode 100644
index 70428113b..000000000
--- a/vendor/google.golang.org/grpc/resolver/dns/pre_go19.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build go1.6, !go1.9
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package dns
-
-import (
- "fmt"
- "net"
-
- "golang.org/x/net/context"
-)
-
-var (
- defaultResolver netResolver = &preGo19Resolver{}
-)
-
-type preGo19Resolver struct {
-}
-
-func (*preGo19Resolver) LookupHost(ctx context.Context, host string) ([]string, error) {
- return net.LookupHost(host)
-}
-
-func (*preGo19Resolver) LookupSRV(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) {
- return net.LookupSRV(service, proto, name)
-}
-
-func (*preGo19Resolver) LookupTXT(ctx context.Context, name string) ([]string, error) {
- return net.LookupTXT(name)
-}
-
-var customAuthorityResolver = func(authority string) (netResolver, error) {
- return nil, fmt.Errorf("Default DNS resolver does not support custom DNS server with go < 1.9")
-}
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 145cf477e..df097eedf 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -29,41 +29,37 @@ var (
// TODO(bar) install dns resolver in init(){}.
-// Register registers the resolver builder to the resolver map. b.Scheme will be
-// used as the scheme registered with this builder.
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Resolvers are
-// registered with the same name, the one registered last will take effect.
+// Register registers the resolver builder to the resolver map.
+// b.Scheme will be used as the scheme registered with this builder.
func Register(b Builder) {
m[b.Scheme()] = b
}
// Get returns the resolver builder registered with the given scheme.
-//
-// If no builder is register with the scheme, nil will be returned.
+// If no builder is register with the scheme, the default scheme will
+// be used.
+// If the default scheme is not modified, "passthrough" will be the default
+// scheme, and the preinstalled dns resolver will be used.
+// If the default scheme is modified, and a resolver is registered with
+// the scheme, that resolver will be returned.
+// If the default scheme is modified, and no resolver is registered with
+// the scheme, nil will be returned.
func Get(scheme string) Builder {
if b, ok := m[scheme]; ok {
return b
}
+ if b, ok := m[defaultScheme]; ok {
+ return b
+ }
return nil
}
-// SetDefaultScheme sets the default scheme that will be used. The default
-// default scheme is "passthrough".
-//
-// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. The scheme set last overrides
-// previously set values.
+// SetDefaultScheme sets the default scheme that will be used.
+// The default default scheme is "passthrough".
func SetDefaultScheme(scheme string) {
defaultScheme = scheme
}
-// GetDefaultScheme gets the default scheme that will be used.
-func GetDefaultScheme() string {
- return defaultScheme
-}
-
// AddressType indicates the address type returned by name resolution.
type AddressType uint8
@@ -94,8 +90,9 @@ type Address struct {
// BuildOption includes additional information for the builder to create
// the resolver.
type BuildOption struct {
- // DisableServiceConfig indicates whether resolver should fetch service config data.
- DisableServiceConfig bool
+ // UserOptions can be used to pass configuration between DialOptions and the
+ // resolver.
+ UserOptions interface{}
}
// ClientConn contains the callbacks for resolver to notify any updates
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
index a6c02ac9e..ef5d4c286 100644
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
@@ -23,19 +23,17 @@ import (
"strings"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/resolver"
)
// ccResolverWrapper is a wrapper on top of cc for resolvers.
// It implements resolver.ClientConnection interface.
type ccResolverWrapper struct {
- cc *ClientConn
- resolver resolver.Resolver
- addrCh chan []resolver.Address
- scCh chan string
- done chan struct{}
- lastAddressesCount int
+ cc *ClientConn
+ resolver resolver.Resolver
+ addrCh chan []resolver.Address
+ scCh chan string
+ done chan struct{}
}
// split2 returns the values from strings.SplitN(s, sep, 2).
@@ -50,32 +48,31 @@ func split2(s, sep string) (string, string, bool) {
// parseTarget splits target into a struct containing scheme, authority and
// endpoint.
-//
-// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
-// target}.
func parseTarget(target string) (ret resolver.Target) {
var ok bool
ret.Scheme, ret.Endpoint, ok = split2(target, "://")
if !ok {
return resolver.Target{Endpoint: target}
}
- ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
+ ret.Authority, ret.Endpoint, _ = split2(ret.Endpoint, "/")
return ret
}
// newCCResolverWrapper parses cc.target for scheme and gets the resolver
-// builder for this scheme and builds the resolver. The monitoring goroutine
-// for it is not started yet and can be created by calling start().
+// builder for this scheme. It then builds the resolver and starts the
+// monitoring goroutine for it.
//
// If withResolverBuilder dial option is set, the specified resolver will be
// used instead.
func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
+ grpclog.Infof("dialing to target with scheme: %q", cc.parsedTarget.Scheme)
+
rb := cc.dopts.resolverBuilder
if rb == nil {
- return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
+ rb = resolver.Get(cc.parsedTarget.Scheme)
+ if rb == nil {
+ return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
+ }
}
ccr := &ccResolverWrapper{
@@ -86,7 +83,9 @@ func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
}
var err error
- ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig})
+ ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{
+ UserOptions: cc.dopts.resolverBuildUserOptions,
+ })
if err != nil {
return nil, err
}
@@ -97,7 +96,7 @@ func (ccr *ccResolverWrapper) start() {
go ccr.watcher()
}
-// watcher processes address updates and service config updates sequentially.
+// watcher processes address updates and service config updates sequencially.
// Otherwise, we need to resolve possible races between address and service
// config (e.g. they specify different balancer types).
func (ccr *ccResolverWrapper) watcher() {
@@ -116,9 +115,6 @@ func (ccr *ccResolverWrapper) watcher() {
default:
}
grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
- if channelz.IsOn() {
- ccr.addChannelzTraceEvent(addrs)
- }
ccr.cc.handleResolvedAddrs(addrs, nil)
case sc := <-ccr.scCh:
select {
@@ -153,7 +149,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
}
// NewServiceConfig is called by the resolver implemenetion to send service
-// configs to gRPC.
+// configs to gPRC.
func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
select {
case <-ccr.scCh:
@@ -161,29 +157,3 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
}
ccr.scCh <- sc
}
-
-func (ccr *ccResolverWrapper) addChannelzTraceEvent(addrs []resolver.Address) {
- if len(addrs) == 0 && ccr.lastAddressesCount != 0 {
- channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
- Desc: "Resolver returns an empty address list",
- Severity: channelz.CtWarning,
- })
- } else if len(addrs) != 0 && ccr.lastAddressesCount == 0 {
- var s string
- for i, a := range addrs {
- if a.ServerName != "" {
- s += a.Addr + "(" + a.ServerName + ")"
- } else {
- s += a.Addr
- }
- if i != len(addrs)-1 {
- s += " "
- }
- }
- channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Resolver returns a non-empty address list (previous one was empty) %q", s),
- Severity: channelz.CtINFO,
- })
- }
- ccr.lastAddressesCount = len(addrs)
-}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 6849e37a5..bf384b644 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -22,12 +22,9 @@ import (
"bytes"
"compress/gzip"
"encoding/binary"
- "fmt"
"io"
"io/ioutil"
"math"
- "net/url"
- "strings"
"sync"
"time"
@@ -35,17 +32,14 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
- "google.golang.org/grpc/encoding/proto"
- "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
)
// Compressor defines the interface gRPC uses to compress a message.
-//
-// Deprecated: use package encoding.
type Compressor interface {
// Do compresses p into w.
Do(w io.Writer, p []byte) error
@@ -58,34 +52,14 @@ type gzipCompressor struct {
}
// NewGZIPCompressor creates a Compressor based on GZIP.
-//
-// Deprecated: use package encoding/gzip.
func NewGZIPCompressor() Compressor {
- c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression)
- return c
-}
-
-// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead
-// of assuming DefaultCompression.
-//
-// The error returned will be nil if the level is valid.
-//
-// Deprecated: use package encoding/gzip.
-func NewGZIPCompressorWithLevel(level int) (Compressor, error) {
- if level < gzip.DefaultCompression || level > gzip.BestCompression {
- return nil, fmt.Errorf("grpc: invalid compression level: %d", level)
- }
return &gzipCompressor{
pool: sync.Pool{
New: func() interface{} {
- w, err := gzip.NewWriterLevel(ioutil.Discard, level)
- if err != nil {
- panic(err)
- }
- return w
+ return gzip.NewWriter(ioutil.Discard)
},
},
- }, nil
+ }
}
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
@@ -103,8 +77,6 @@ func (c *gzipCompressor) Type() string {
}
// Decompressor defines the interface gRPC uses to decompress a message.
-//
-// Deprecated: use package encoding.
type Decompressor interface {
// Do reads the data from r and uncompress them.
Do(r io.Reader) ([]byte, error)
@@ -117,8 +89,6 @@ type gzipDecompressor struct {
}
// NewGZIPDecompressor creates a Decompressor based on GZIP.
-//
-// Deprecated: use package encoding/gzip.
func NewGZIPDecompressor() Decompressor {
return &gzipDecompressor{}
}
@@ -155,20 +125,17 @@ func (d *gzipDecompressor) Type() string {
type callInfo struct {
compressorType string
failFast bool
- stream *clientStream
+ headerMD metadata.MD
+ trailerMD metadata.MD
+ peer *peer.Peer
+ traceInfo traceInfo // in trace.go
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
- contentSubtype string
- codec baseCodec
- maxRetryRPCBufferSize int
}
func defaultCallInfo() *callInfo {
- return &callInfo{
- failFast: true,
- maxRetryRPCBufferSize: 256 * 1024, // 256KB
- }
+ return &callInfo{failFast: true}
}
// CallOption configures a Call before it starts or extracts information from
@@ -191,66 +158,40 @@ type EmptyCallOption struct{}
func (EmptyCallOption) before(*callInfo) error { return nil }
func (EmptyCallOption) after(*callInfo) {}
+type beforeCall func(c *callInfo) error
+
+func (o beforeCall) before(c *callInfo) error { return o(c) }
+func (o beforeCall) after(c *callInfo) {}
+
+type afterCall func(c *callInfo)
+
+func (o afterCall) before(c *callInfo) error { return nil }
+func (o afterCall) after(c *callInfo) { o(c) }
+
// Header returns a CallOptions that retrieves the header metadata
// for a unary RPC.
func Header(md *metadata.MD) CallOption {
- return HeaderCallOption{HeaderAddr: md}
-}
-
-// HeaderCallOption is a CallOption for collecting response header metadata.
-// The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
-type HeaderCallOption struct {
- HeaderAddr *metadata.MD
-}
-
-func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo) {
- if c.stream != nil {
- *o.HeaderAddr, _ = c.stream.Header()
- }
+ return afterCall(func(c *callInfo) {
+ *md = c.headerMD
+ })
}
// Trailer returns a CallOptions that retrieves the trailer metadata
// for a unary RPC.
func Trailer(md *metadata.MD) CallOption {
- return TrailerCallOption{TrailerAddr: md}
-}
-
-// TrailerCallOption is a CallOption for collecting response trailer metadata.
-// The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
-type TrailerCallOption struct {
- TrailerAddr *metadata.MD
-}
-
-func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo) {
- if c.stream != nil {
- *o.TrailerAddr = c.stream.Trailer()
- }
-}
-
-// Peer returns a CallOption that retrieves peer information for a unary RPC.
-// The peer field will be populated *after* the RPC completes.
-func Peer(p *peer.Peer) CallOption {
- return PeerCallOption{PeerAddr: p}
+ return afterCall(func(c *callInfo) {
+ *md = c.trailerMD
+ })
}
-// PeerCallOption is a CallOption for collecting the identity of the remote
-// peer. The peer field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
-type PeerCallOption struct {
- PeerAddr *peer.Peer
-}
-
-func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo) {
- if c.stream != nil {
- if x, ok := peer.FromContext(c.stream.Context()); ok {
- *o.PeerAddr = *x
+// Peer returns a CallOption that retrieves peer information for a
+// unary RPC.
+func Peer(peer *peer.Peer) CallOption {
+ return afterCall(func(c *callInfo) {
+ if c.peer != nil {
+ *peer = *c.peer
}
- }
+ })
}
// FailFast configures the action to take when an RPC is attempted on broken
@@ -264,76 +205,36 @@ func (o PeerCallOption) after(c *callInfo) {
//
// By default, RPCs are "Fail Fast".
func FailFast(failFast bool) CallOption {
- return FailFastCallOption{FailFast: failFast}
-}
-
-// FailFastCallOption is a CallOption for indicating whether an RPC should fail
-// fast or not.
-// This is an EXPERIMENTAL API.
-type FailFastCallOption struct {
- FailFast bool
-}
-
-func (o FailFastCallOption) before(c *callInfo) error {
- c.failFast = o.FailFast
- return nil
+ return beforeCall(func(c *callInfo) error {
+ c.failFast = failFast
+ return nil
+ })
}
-func (o FailFastCallOption) after(c *callInfo) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
func MaxCallRecvMsgSize(s int) CallOption {
- return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
-}
-
-// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can receive.
-// This is an EXPERIMENTAL API.
-type MaxRecvMsgSizeCallOption struct {
- MaxRecvMsgSize int
-}
-
-func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
- c.maxReceiveMessageSize = &o.MaxRecvMsgSize
- return nil
+ return beforeCall(func(o *callInfo) error {
+ o.maxReceiveMessageSize = &s
+ return nil
+ })
}
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
func MaxCallSendMsgSize(s int) CallOption {
- return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
-}
-
-// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can send.
-// This is an EXPERIMENTAL API.
-type MaxSendMsgSizeCallOption struct {
- MaxSendMsgSize int
-}
-
-func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
- c.maxSendMessageSize = &o.MaxSendMsgSize
- return nil
+ return beforeCall(func(o *callInfo) error {
+ o.maxSendMessageSize = &s
+ return nil
+ })
}
-func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption {
- return PerRPCCredsCallOption{Creds: creds}
-}
-
-// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
-// credentials to use for the call.
-// This is an EXPERIMENTAL API.
-type PerRPCCredsCallOption struct {
- Creds credentials.PerRPCCredentials
-}
-
-func (o PerRPCCredsCallOption) before(c *callInfo) error {
- c.creds = o.Creds
- return nil
+ return beforeCall(func(c *callInfo) error {
+ c.creds = creds
+ return nil
+ })
}
-func (o PerRPCCredsCallOption) after(c *callInfo) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
@@ -341,110 +242,18 @@ func (o PerRPCCredsCallOption) after(c *callInfo) {}
//
// This API is EXPERIMENTAL.
func UseCompressor(name string) CallOption {
- return CompressorCallOption{CompressorType: name}
-}
-
-// CompressorCallOption is a CallOption that indicates the compressor to use.
-// This is an EXPERIMENTAL API.
-type CompressorCallOption struct {
- CompressorType string
-}
-
-func (o CompressorCallOption) before(c *callInfo) error {
- c.compressorType = o.CompressorType
- return nil
-}
-func (o CompressorCallOption) after(c *callInfo) {}
-
-// CallContentSubtype returns a CallOption that will set the content-subtype
-// for a call. For example, if content-subtype is "json", the Content-Type over
-// the wire will be "application/grpc+json". The content-subtype is converted
-// to lowercase before being included in Content-Type. See Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If CallCustomCodec is not also used, the content-subtype will be used to
-// look up the Codec to use in the registry controlled by RegisterCodec. See
-// the documentation on RegisterCodec for details on registration. The lookup
-// of content-subtype is case-insensitive. If no such Codec is found, the call
-// will result in an error with code codes.Internal.
-//
-// If CallCustomCodec is also used, that Codec will be used for all request and
-// response messages, with the content-subtype set to the given contentSubtype
-// here for requests.
-func CallContentSubtype(contentSubtype string) CallOption {
- return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)}
-}
-
-// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
-// used for marshaling messages.
-// This is an EXPERIMENTAL API.
-type ContentSubtypeCallOption struct {
- ContentSubtype string
-}
-
-func (o ContentSubtypeCallOption) before(c *callInfo) error {
- c.contentSubtype = o.ContentSubtype
- return nil
-}
-func (o ContentSubtypeCallOption) after(c *callInfo) {}
-
-// CallCustomCodec returns a CallOption that will set the given Codec to be
-// used for all request and response messages for a call. The result of calling
-// String() will be used as the content-subtype in a case-insensitive manner.
-//
-// See Content-Type on
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details. Also see the documentation on RegisterCodec and
-// CallContentSubtype for more details on the interaction between Codec and
-// content-subtype.
-//
-// This function is provided for advanced users; prefer to use only
-// CallContentSubtype to select a registered codec instead.
-func CallCustomCodec(codec Codec) CallOption {
- return CustomCodecCallOption{Codec: codec}
-}
-
-// CustomCodecCallOption is a CallOption that indicates the codec used for
-// marshaling messages.
-// This is an EXPERIMENTAL API.
-type CustomCodecCallOption struct {
- Codec Codec
-}
-
-func (o CustomCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
- return nil
-}
-func (o CustomCodecCallOption) after(c *callInfo) {}
-
-// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
-// used for buffering this RPC's requests for retry purposes.
-//
-// This API is EXPERIMENTAL.
-func MaxRetryRPCBufferSize(bytes int) CallOption {
- return MaxRetryRPCBufferSizeCallOption{bytes}
-}
-
-// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
-// memory to be used for caching this RPC for retry purposes.
-// This is an EXPERIMENTAL API.
-type MaxRetryRPCBufferSizeCallOption struct {
- MaxRetryRPCBufferSize int
-}
-
-func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
- c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
- return nil
+ return beforeCall(func(c *callInfo) error {
+ c.compressorType = name
+ return nil
+ })
}
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
const (
- compressionNone payloadFormat = 0 // no compression
- compressionMade payloadFormat = 1 // compressed
+ compressionNone payloadFormat = iota // no compression
+ compressionMade
)
// parser reads complete gRPC messages from the underlying reader.
@@ -454,8 +263,8 @@ type parser struct {
// error types.
r io.Reader
- // The header of a gRPC message. Find more detail at
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
+ // The header of a gRPC message. Find more detail
+ // at https://grpc.io/docs/guides/wire.html.
header [5]byte
}
@@ -468,7 +277,7 @@ type parser struct {
// * io.EOF, when no messages remain
// * io.ErrUnexpectedEOF
// * of type transport.ConnectionError
-// * an error from the status package
+// * of type transport.StreamError
// No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible
// error.
@@ -501,85 +310,65 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
return pf, msg, nil
}
-// encode serializes msg and returns a buffer containing the message, or an
-// error if it is too large to be transmitted by grpc. If msg is nil, it
-// generates an empty message.
-func encode(c baseCodec, msg interface{}) ([]byte, error) {
- if msg == nil { // NOTE: typed nils will not be caught by this check
- return nil, nil
- }
- b, err := c.Marshal(msg)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
- }
- if uint(len(b)) > math.MaxUint32 {
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
- }
- return b, nil
-}
-
-// compress returns the input bytes compressed by compressor or cp. If both
-// compressors are nil, returns nil.
-//
-// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
- if compressor == nil && cp == nil {
- return nil, nil
- }
- wrapErr := func(err error) error {
- return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
- }
- cbuf := &bytes.Buffer{}
- if compressor != nil {
- z, err := compressor.Compress(cbuf)
+// encode serializes msg and returns a buffer of message header and a buffer of msg.
+// If msg is nil, it generates the message header and an empty msg buffer.
+// TODO(ddyihai): eliminate extra Compressor parameter.
+func encode(c Codec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) {
+ var (
+ b []byte
+ cbuf *bytes.Buffer
+ )
+ const (
+ payloadLen = 1
+ sizeLen = 4
+ )
+ if msg != nil {
+ var err error
+ b, err = c.Marshal(msg)
if err != nil {
- return nil, wrapErr(err)
+ return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
- if _, err := z.Write(in); err != nil {
- return nil, wrapErr(err)
+ if outPayload != nil {
+ outPayload.Payload = msg
+ // TODO truncate large payload.
+ outPayload.Data = b
+ outPayload.Length = len(b)
}
- if err := z.Close(); err != nil {
- return nil, wrapErr(err)
- }
- } else {
- if err := cp.Do(cbuf, in); err != nil {
- return nil, wrapErr(err)
+ if compressor != nil || cp != nil {
+ cbuf = new(bytes.Buffer)
+ // Has compressor, check Compressor is set by UseCompressor first.
+ if compressor != nil {
+ z, _ := compressor.Compress(cbuf)
+ if _, err := z.Write(b); err != nil {
+ return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+ }
+ z.Close()
+ } else {
+ // If Compressor is not set by UseCompressor, use default Compressor
+ if err := cp.Do(cbuf, b); err != nil {
+ return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
+ }
+ }
+ b = cbuf.Bytes()
}
}
- return cbuf.Bytes(), nil
-}
-
-const (
- payloadLen = 1
- sizeLen = 4
- headerLen = payloadLen + sizeLen
-)
+ if uint(len(b)) > math.MaxUint32 {
+ return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+ }
-// msgHeader returns a 5-byte header for the message being transmitted and the
-// payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
- hdr = make([]byte, headerLen)
- if compData != nil {
- hdr[0] = byte(compressionMade)
- data = compData
+ bufHeader := make([]byte, payloadLen+sizeLen)
+ if compressor != nil || cp != nil {
+ bufHeader[0] = byte(compressionMade)
} else {
- hdr[0] = byte(compressionNone)
+ bufHeader[0] = byte(compressionNone)
}
- // Write length of payload into buf
- binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
- return hdr, data
-}
-
-func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
- return &stats.OutPayload{
- Client: client,
- Payload: msg,
- Data: data,
- Length: len(data),
- WireLength: len(payload) + headerLen,
- SentTime: t,
+ // Write length of b into buf
+ binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b)))
+ if outPayload != nil {
+ outPayload.WireLength = payloadLen + sizeLen + len(b)
}
+ return bufHeader, b, nil
}
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
@@ -598,17 +387,20 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
return nil
}
-func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) ([]byte, error) {
+// For the two compressor parameters, both should not be set, but if they are,
+// dc takes precedence over compressor.
+// TODO(dfawley): wrap the old compressor/decompressor using the new API?
+func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
pf, d, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
- return nil, err
+ return err
}
if inPayload != nil {
inPayload.WireLength = len(d)
}
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
- return nil, st.Err()
+ return st.Err()
}
if pf == compressionMade {
@@ -617,34 +409,23 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
if dc != nil {
d, err = dc.Do(bytes.NewReader(d))
if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
} else {
dcReader, err := compressor.Decompress(bytes.NewReader(d))
if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
d, err = ioutil.ReadAll(dcReader)
if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
}
}
}
if len(d) > maxReceiveMessageSize {
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
- }
- return d, nil
-}
-
-// For the two compressor parameters, both should not be set, but if they are,
-// dc takes precedence over compressor.
-// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error {
- d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, inPayload, compressor)
- if err != nil {
- return err
+ return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize)
}
if err := c.Unmarshal(d, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
@@ -704,74 +485,6 @@ func Errorf(c codes.Code, format string, a ...interface{}) error {
return status.Errorf(c, format, a...)
}
-// setCallInfoCodec should only be called after CallOptions have been applied.
-func setCallInfoCodec(c *callInfo) error {
- if c.codec != nil {
- // codec was already set by a CallOption; use it.
- return nil
- }
-
- if c.contentSubtype == "" {
- // No codec specified in CallOptions; use proto by default.
- c.codec = encoding.GetCodec(proto.Name)
- return nil
- }
-
- // c.contentSubtype is already lowercased in CallContentSubtype
- c.codec = encoding.GetCodec(c.contentSubtype)
- if c.codec == nil {
- return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
- }
- return nil
-}
-
-// parseDialTarget returns the network and address to pass to dialer
-func parseDialTarget(target string) (net string, addr string) {
- net = "tcp"
-
- m1 := strings.Index(target, ":")
- m2 := strings.Index(target, ":/")
-
- // handle unix:addr which will fail with url.Parse
- if m1 >= 0 && m2 < 0 {
- if n := target[0:m1]; n == "unix" {
- net = n
- addr = target[m1+1:]
- return net, addr
- }
- }
- if m2 >= 0 {
- t, err := url.Parse(target)
- if err != nil {
- return net, target
- }
- scheme := t.Scheme
- addr = t.Path
- if scheme == "unix" {
- net = scheme
- if addr == "" {
- addr = t.Host
- }
- return net, addr
- }
- }
-
- return net, target
-}
-
-// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
-// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
- callsStarted int64
- callsFailed int64
- callsSucceeded int64
- // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
- // time.Time since it's more costly to atomically update time.Time variable than int64 variable.
- lastCallStartedTime int64
-}
-
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
// support package version is 5.
@@ -786,4 +499,7 @@ const (
SupportPackageIsVersion5 = true
)
+// Version is the current grpc version.
+const Version = "1.9.1"
+
const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 920da5e01..f65162168 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -19,6 +19,7 @@
package grpc
import (
+ "bytes"
"errors"
"fmt"
"io"
@@ -29,24 +30,24 @@ import (
"runtime"
"strings"
"sync"
- "sync/atomic"
"time"
+ "io/ioutil"
+
"golang.org/x/net/context"
+ "golang.org/x/net/http2"
"golang.org/x/net/trace"
-
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
- "google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
+ "google.golang.org/grpc/transport"
)
const (
@@ -95,20 +96,16 @@ type Server struct {
m map[string]*service // service name -> service info
events trace.EventLog
- quit chan struct{}
- done chan struct{}
- quitOnce sync.Once
- doneOnce sync.Once
- channelzRemoveOnce sync.Once
- serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
-
- channelzID int64 // channelz unique identification number
- czData *channelzData
+ quit chan struct{}
+ done chan struct{}
+ quitOnce sync.Once
+ doneOnce sync.Once
+ serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
}
type options struct {
creds credentials.TransportCredentials
- codec baseCodec
+ codec Codec
cp Compressor
dc Decompressor
unaryInt UnaryServerInterceptor
@@ -118,6 +115,7 @@ type options struct {
maxConcurrentStreams uint32
maxReceiveMessageSize int
maxSendMessageSize int
+ useHandlerImpl bool // use http.Handler-based server
unknownStreamDesc *StreamDesc
keepaliveParams keepalive.ServerParameters
keepalivePolicy keepalive.EnforcementPolicy
@@ -126,25 +124,19 @@ type options struct {
writeBufferSize int
readBufferSize int
connectionTimeout time.Duration
- maxHeaderListSize *uint32
}
var defaultServerOptions = options{
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second,
- writeBufferSize: defaultWriteBufSize,
- readBufferSize: defaultReadBufSize,
}
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption func(*options)
-// WriteBufferSize determines how much data can be batched before doing a write on the wire.
-// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
-// The default value for this buffer is 32KB.
-// Zero will disable the write buffer such that each write will be on underlying connection.
-// Note: A Send call may not directly translate to a write.
+// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched
+// before doing a write on the wire.
func WriteBufferSize(s int) ServerOption {
return func(o *options) {
o.writeBufferSize = s
@@ -153,9 +145,6 @@ func WriteBufferSize(s int) ServerOption {
// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
// for one read syscall.
-// The default value for this buffer is 32KB.
-// Zero will disable read buffer for a connection so data framer can access the underlying
-// conn directly.
func ReadBufferSize(s int) ServerOption {
return func(o *options) {
o.readBufferSize = s
@@ -193,8 +182,6 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
}
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
-//
-// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
func CustomCodec(codec Codec) ServerOption {
return func(o *options) {
o.codec = codec
@@ -226,9 +213,7 @@ func RPCDecompressor(dc Decompressor) ServerOption {
}
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
-// If this is not set, gRPC uses the default limit.
-//
-// Deprecated: use MaxRecvMsgSize instead.
+// If this is not set, gRPC uses the default limit. Deprecated: use MaxRecvMsgSize instead.
func MaxMsgSize(m int) ServerOption {
return MaxRecvMsgSize(m)
}
@@ -335,14 +320,6 @@ func ConnectionTimeout(d time.Duration) ServerOption {
}
}
-// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
-// of header list that the server is prepared to accept.
-func MaxHeaderListSize(s uint32) ServerOption {
- return func(o *options) {
- o.maxHeaderListSize = &s
- }
-}
-
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
@@ -350,24 +327,23 @@ func NewServer(opt ...ServerOption) *Server {
for _, o := range opt {
o(&opts)
}
+ if opts.codec == nil {
+ // Set the default codec.
+ opts.codec = protoCodec{}
+ }
s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[io.Closer]bool),
- m: make(map[string]*service),
- quit: make(chan struct{}),
- done: make(chan struct{}),
- czData: new(channelzData),
+ lis: make(map[net.Listener]bool),
+ opts: opts,
+ conns: make(map[io.Closer]bool),
+ m: make(map[string]*service),
+ quit: make(chan struct{}),
+ done: make(chan struct{}),
}
s.cv = sync.NewCond(&s.mu)
if EnableTracing {
_, file, line, _ := runtime.Caller(1)
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
}
-
- if channelz.IsOn() {
- s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
- }
return s
}
@@ -483,26 +459,6 @@ func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credenti
return s.opts.creds.ServerHandshake(rawConn)
}
-type listenSocket struct {
- net.Listener
- channelzID int64
-}
-
-func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
- return &channelz.SocketInternalMetric{
- SocketOptions: channelz.GetSocketOption(l.Listener),
- LocalAddr: l.Listener.Addr(),
- }
-}
-
-func (l *listenSocket) Close() error {
- err := l.Listener.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(l.channelzID)
- }
- return err
-}
-
// Serve accepts incoming connections on the listener lis, creating a new
// ServerTransport and service goroutine for each. The service goroutines
// read gRPC requests and then call the registered handlers to reply to them.
@@ -531,19 +487,13 @@ func (s *Server) Serve(lis net.Listener) error {
}
}()
- ls := &listenSocket{Listener: lis}
- s.lis[ls] = true
-
- if channelz.IsOn() {
- ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "")
- }
+ s.lis[lis] = true
s.mu.Unlock()
-
defer func() {
s.mu.Lock()
- if s.lis != nil && s.lis[ls] {
- ls.Close()
- delete(s.lis, ls)
+ if s.lis != nil && s.lis[lis] {
+ lis.Close()
+ delete(s.lis, lis)
}
s.mu.Unlock()
}()
@@ -627,19 +577,27 @@ func (s *Server) handleRawConn(rawConn net.Conn) {
}
s.mu.Unlock()
- // Finish handshaking (HTTP2)
- st := s.newHTTP2Transport(conn, authInfo)
- if st == nil {
- return
+ var serve func()
+ c := conn.(io.Closer)
+ if s.opts.useHandlerImpl {
+ serve = func() { s.serveUsingHandler(conn) }
+ } else {
+ // Finish handshaking (HTTP2)
+ st := s.newHTTP2Transport(conn, authInfo)
+ if st == nil {
+ return
+ }
+ c = st
+ serve = func() { s.serveStreams(st) }
}
rawConn.SetDeadline(time.Time{})
- if !s.addConn(st) {
+ if !s.addConn(c) {
return
}
go func() {
- s.serveStreams(st)
- s.removeConn(st)
+ serve()
+ s.removeConn(c)
}()
}
@@ -657,8 +615,6 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
InitialConnWindowSize: s.opts.initialConnWindowSize,
WriteBufferSize: s.opts.writeBufferSize,
ReadBufferSize: s.opts.readBufferSize,
- ChannelzParentID: s.channelzID,
- MaxHeaderListSize: s.opts.maxHeaderListSize,
}
st, err := transport.NewServerTransport("http2", c, config)
if err != nil {
@@ -669,7 +625,6 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr
grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
return nil
}
-
return st
}
@@ -694,6 +649,27 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
var _ http.Handler = (*Server)(nil)
+// serveUsingHandler is called from handleRawConn when s is configured
+// to handle requests via the http.Handler interface. It sets up a
+// net/http.Server to handle the just-accepted conn. The http.Server
+// is configured to route all incoming requests (all HTTP/2 streams)
+// to ServeHTTP, which creates a new ServerTransport for each stream.
+// serveUsingHandler blocks until conn closes.
+//
+// This codepath is only used when Server.TestingUseHandlerImpl has
+// been configured. This lets the end2end tests exercise the ServeHTTP
+// method as one of the environment types.
+//
+// conn is the *tls.Conn that's already been authenticated.
+func (s *Server) serveUsingHandler(conn net.Conn) {
+ h2s := &http2.Server{
+ MaxConcurrentStreams: s.opts.maxConcurrentStreams,
+ }
+ h2s.ServeConn(conn, &http2.ServeConnOpts{
+ Handler: s,
+ })
+}
+
// ServeHTTP implements the Go standard library's http.Handler
// interface by responding to the gRPC request r, by looking up
// the requested gRPC method in the gRPC server s.
@@ -719,7 +695,7 @@ var _ http.Handler = (*Server)(nil)
// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
// and subject to change.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
+ st, err := transport.NewServerHandlerTransport(w, r)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
@@ -776,73 +752,39 @@ func (s *Server) removeConn(c io.Closer) {
}
}
-func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
- return &channelz.ServerInternalMetric{
- CallsStarted: atomic.LoadInt64(&s.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&s.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
- }
-}
-
-func (s *Server) incrCallsStarted() {
- atomic.AddInt64(&s.czData.callsStarted, 1)
- atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (s *Server) incrCallsSucceeded() {
- atomic.AddInt64(&s.czData.callsSucceeded, 1)
-}
-
-func (s *Server) incrCallsFailed() {
- atomic.AddInt64(&s.czData.callsFailed, 1)
-}
-
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
- data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
- if err != nil {
- grpclog.Errorln("grpc: server failed to encode response: ", err)
- return err
+ var (
+ outPayload *stats.OutPayload
+ )
+ if s.opts.statsHandler != nil {
+ outPayload = &stats.OutPayload{}
}
- compData, err := compress(data, cp, comp)
+ hdr, data, err := encode(s.opts.codec, msg, cp, outPayload, comp)
if err != nil {
- grpclog.Errorln("grpc: server failed to compress response: ", err)
+ grpclog.Errorln("grpc: server failed to encode response: ", err)
return err
}
- hdr, payload := msgHeader(data, compData)
- // TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > s.opts.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+ if len(data) > s.opts.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize)
}
- err = t.Write(stream, hdr, payload, opts)
- if err == nil && s.opts.statsHandler != nil {
- s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
+ err = t.Write(stream, hdr, data, opts)
+ if err == nil && outPayload != nil {
+ outPayload.SentTime = time.Now()
+ s.opts.statsHandler.HandleRPC(stream.Context(), outPayload)
}
return err
}
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
- if channelz.IsOn() {
- s.incrCallsStarted()
- defer func() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }()
- }
sh := s.opts.statsHandler
if sh != nil {
- beginTime := time.Now()
begin := &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
defer func() {
end := &stats.End{
- BeginTime: beginTime,
- EndTime: time.Now(),
+ EndTime: time.Now(),
}
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
@@ -898,32 +840,77 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
}
- var inPayload *stats.InPayload
- if sh != nil {
- inPayload = &stats.InPayload{
- RecvTime: time.Now(),
- }
+ p := &parser{r: stream}
+ pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize)
+ if err == io.EOF {
+ // The entire stream is done (for unary RPC only).
+ return err
+ }
+ if err == io.ErrUnexpectedEOF {
+ err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
}
- d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, inPayload, decomp)
if err != nil {
if st, ok := status.FromError(err); ok {
if e := t.WriteStatus(stream, st); e != nil {
grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
}
+ } else {
+ switch st := err.(type) {
+ case transport.ConnectionError:
+ // Nothing to do here.
+ case transport.StreamError:
+ if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
+ }
+ default:
+ panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st))
+ }
}
return err
}
- if channelz.IsOn() {
- t.IncrMsgRecv()
+ if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil {
+ if e := t.WriteStatus(stream, st); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
+ }
+ return st.Err()
+ }
+ var inPayload *stats.InPayload
+ if sh != nil {
+ inPayload = &stats.InPayload{
+ RecvTime: time.Now(),
+ }
}
df := func(v interface{}) error {
- if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
+ if inPayload != nil {
+ inPayload.WireLength = len(req)
+ }
+ if pf == compressionMade {
+ var err error
+ if dc != nil {
+ req, err = dc.Do(bytes.NewReader(req))
+ if err != nil {
+ return status.Errorf(codes.Internal, err.Error())
+ }
+ } else {
+ tmp, _ := decomp.Decompress(bytes.NewReader(req))
+ req, err = ioutil.ReadAll(tmp)
+ if err != nil {
+ return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ }
+ }
+ }
+ if len(req) > s.opts.maxReceiveMessageSize {
+ // TODO: Revisit the error code. Currently keep it consistent with
+ // java implementation.
+ return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize)
+ }
+ if err := s.opts.codec.Unmarshal(req, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
if inPayload != nil {
inPayload.Payload = v
- inPayload.Data = d
- inPayload.Length = len(d)
+ inPayload.Data = req
+ inPayload.Length = len(req)
sh.HandleRPC(stream.Context(), inPayload)
}
if trInfo != nil {
@@ -931,13 +918,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
return nil
}
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
- reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
+ reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
// Convert appErr if it is not a grpc status error.
- appErr = status.Error(codes.Unknown, appErr.Error())
+ appErr = status.Error(convertCode(appErr), appErr.Error())
appStatus, _ = status.FromError(appErr)
}
if trInfo != nil {
@@ -952,7 +938,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false)
}
- opts := &transport.Options{Last: true}
+ opts := &transport.Options{
+ Last: true,
+ Delay: false,
+ }
if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
if err == io.EOF {
@@ -967,15 +956,16 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
switch st := err.(type) {
case transport.ConnectionError:
// Nothing to do here.
+ case transport.StreamError:
+ if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil {
+ grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
+ }
default:
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
}
}
return err
}
- if channelz.IsOn() {
- t.IncrMsgSent()
- }
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
}
@@ -986,27 +976,15 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
}
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
- if channelz.IsOn() {
- s.incrCallsStarted()
- defer func() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }()
- }
sh := s.opts.statsHandler
if sh != nil {
- beginTime := time.Now()
begin := &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: time.Now(),
}
sh.HandleRPC(stream.Context(), begin)
defer func() {
end := &stats.End{
- BeginTime: beginTime,
- EndTime: time.Now(),
+ EndTime: time.Now(),
}
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
@@ -1014,13 +992,11 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
sh.HandleRPC(stream.Context(), end)
}()
}
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
ss := &serverStream{
- ctx: ctx,
- t: t,
- s: stream,
- p: &parser{r: stream},
- codec: s.getCodec(stream.ContentSubtype()),
+ t: t,
+ s: stream,
+ p: &parser{r: stream},
+ codec: s.opts.codec,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
@@ -1086,7 +1062,12 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
- appStatus = status.New(codes.Unknown, appErr.Error())
+ switch err := appErr.(type) {
+ case transport.StreamError:
+ appStatus = status.New(err.Code, err.Desc)
+ default:
+ appStatus = status.New(convertCode(appErr), appErr.Error())
+ }
appErr = appStatus.Err()
}
if trInfo != nil {
@@ -1105,6 +1086,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
ss.mu.Unlock()
}
return t.WriteStatus(ss.s, status.New(codes.OK, ""))
+
}
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
@@ -1133,27 +1115,47 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
}
service := sm[:pos]
method := sm[pos+1:]
-
- if srv, ok := s.m[service]; ok {
- if md, ok := srv.md[method]; ok {
- s.processUnaryRPC(t, stream, srv, md, trInfo)
+ srv, ok := s.m[service]
+ if !ok {
+ if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
+ s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
return
}
- if sd, ok := srv.sd[method]; ok {
- s.processStreamingRPC(t, stream, srv, sd, trInfo)
- return
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
+ trInfo.tr.SetError()
}
+ errDesc := fmt.Sprintf("unknown service %v", service)
+ if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ trInfo.tr.SetError()
+ }
+ grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
+ }
+ if trInfo != nil {
+ trInfo.tr.Finish()
+ }
+ return
}
- // Unknown service, or known server unknown method.
- if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+ // Unary RPC or Streaming RPC?
+ if md, ok := srv.md[method]; ok {
+ s.processUnaryRPC(t, stream, srv, md, trInfo)
+ return
+ }
+ if sd, ok := srv.sd[method]; ok {
+ s.processStreamingRPC(t, stream, srv, sd, trInfo)
return
}
if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
+ trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
trInfo.tr.SetError()
}
- errDesc := fmt.Sprintf("unknown service %v", service)
+ if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
+ s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+ return
+ }
+ errDesc := fmt.Sprintf("unknown method %v", method)
if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
if trInfo != nil {
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
@@ -1166,42 +1168,6 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str
}
}
-// The key to save ServerTransportStream in the context.
-type streamKey struct{}
-
-// NewContextWithServerTransportStream creates a new context from ctx and
-// attaches stream to it.
-//
-// This API is EXPERIMENTAL.
-func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
- return context.WithValue(ctx, streamKey{}, stream)
-}
-
-// ServerTransportStream is a minimal interface that a transport stream must
-// implement. This can be used to mock an actual transport stream for tests of
-// handler code that use, for example, grpc.SetHeader (which requires some
-// stream to be in context).
-//
-// See also NewContextWithServerTransportStream.
-//
-// This API is EXPERIMENTAL.
-type ServerTransportStream interface {
- Method() string
- SetHeader(md metadata.MD) error
- SendHeader(md metadata.MD) error
- SetTrailer(md metadata.MD) error
-}
-
-// ServerTransportStreamFromContext returns the ServerTransportStream saved in
-// ctx. Returns nil if the given context has no stream associated with it
-// (which implies it is not an RPC invocation context).
-//
-// This API is EXPERIMENTAL.
-func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
- s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
- return s
-}
-
// Stop stops the gRPC server. It immediately closes all open
// connections and listeners.
// It cancels all active RPCs on the server side and the corresponding
@@ -1219,12 +1185,6 @@ func (s *Server) Stop() {
})
}()
- s.channelzRemoveOnce.Do(func() {
- if channelz.IsOn() {
- channelz.RemoveEntry(s.channelzID)
- }
- })
-
s.mu.Lock()
listeners := s.lis
s.lis = nil
@@ -1263,17 +1223,11 @@ func (s *Server) GracefulStop() {
})
}()
- s.channelzRemoveOnce.Do(func() {
- if channelz.IsOn() {
- channelz.RemoveEntry(s.channelzID)
- }
- })
s.mu.Lock()
if s.conns == nil {
s.mu.Unlock()
return
}
-
for lis := range s.lis {
lis.Close()
}
@@ -1302,20 +1256,10 @@ func (s *Server) GracefulStop() {
s.mu.Unlock()
}
-// contentSubtype must be lowercase
-// cannot return nil
-func (s *Server) getCodec(contentSubtype string) baseCodec {
- if s.opts.codec != nil {
- return s.opts.codec
- }
- if contentSubtype == "" {
- return encoding.GetCodec(proto.Name)
- }
- codec := encoding.GetCodec(contentSubtype)
- if codec == nil {
- return encoding.GetCodec(proto.Name)
+func init() {
+ internal.TestingUseHandlerImpl = func(arg interface{}) {
+ arg.(*Server).opts.useHandlerImpl = true
}
- return codec
}
// SetHeader sets the header metadata.
@@ -1328,8 +1272,8 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 {
return nil
}
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
+ stream, ok := transport.StreamFromContext(ctx)
+ if !ok {
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
return stream.SetHeader(md)
@@ -1338,11 +1282,15 @@ func SetHeader(ctx context.Context, md metadata.MD) error {
// SendHeader sends header metadata. It may be called at most once.
// The provided md and headers set by SetHeader() will be sent.
func SendHeader(ctx context.Context, md metadata.MD) error {
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
+ stream, ok := transport.StreamFromContext(ctx)
+ if !ok {
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
- if err := stream.SendHeader(md); err != nil {
+ t := stream.ServerTransport()
+ if t == nil {
+ grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
+ }
+ if err := t.WriteHeader(stream, md); err != nil {
return toRPCErr(err)
}
return nil
@@ -1354,27 +1302,9 @@ func SetTrailer(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 {
return nil
}
- stream := ServerTransportStreamFromContext(ctx)
- if stream == nil {
+ stream, ok := transport.StreamFromContext(ctx)
+ if !ok {
return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx)
}
return stream.SetTrailer(md)
}
-
-// Method returns the method string for the server context. The returned
-// string is in the format of "/service/method".
-func Method(ctx context.Context) (string, bool) {
- s := ServerTransportStreamFromContext(ctx)
- if s == nil {
- return "", false
- }
- return s.Method(), true
-}
-
-type channelzServer struct {
- s *Server
-}
-
-func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
- return c.s.channelzMetric()
-}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index a305fe0a4..53fa88f37 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -25,7 +25,6 @@ import (
"strings"
"time"
- "google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
)
@@ -33,8 +32,7 @@ const maxInt = int(^uint(0) >> 1)
// MethodConfig defines the configuration recommended by the service providers for a
// particular method.
-//
-// Deprecated: Users should not use this struct. Service config should be received
+// DEPRECATED: Users should not use this struct. Service config should be received
// through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
type MethodConfig struct {
@@ -57,98 +55,22 @@ type MethodConfig struct {
// MaxRespSize is the maximum allowed payload size for an individual response in a
// stream (server->client) in bytes.
MaxRespSize *int
- // RetryPolicy configures retry options for the method.
- retryPolicy *retryPolicy
}
// ServiceConfig is provided by the service provider and contains parameters for how
// clients that connect to the service should behave.
-//
-// Deprecated: Users should not use this struct. Service config should be received
+// DEPRECATED: Users should not use this struct. Service config should be received
// through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
type ServiceConfig struct {
// LB is the load balancer the service providers recommends. The balancer specified
// via grpc.WithBalancer will override this.
LB *string
-
- // Methods contains a map for the methods in this service. If there is an
- // exact match for a method (i.e. /service/method) in the map, use the
- // corresponding MethodConfig. If there's no exact match, look for the
- // default config for the service (/service/) and use the corresponding
- // MethodConfig if it exists. Otherwise, the method has no MethodConfig to
- // use.
+ // Methods contains a map for the methods in this service.
+ // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig.
+ // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists.
+ // Otherwise, the method has no MethodConfig to use.
Methods map[string]MethodConfig
-
- // If a retryThrottlingPolicy is provided, gRPC will automatically throttle
- // retry attempts and hedged RPCs when the client’s ratio of failures to
- // successes exceeds a threshold.
- //
- // For each server name, the gRPC client will maintain a token_count which is
- // initially set to maxTokens, and can take values between 0 and maxTokens.
- //
- // Every outgoing RPC (regardless of service or method invoked) will change
- // token_count as follows:
- //
- // - Every failed RPC will decrement the token_count by 1.
- // - Every successful RPC will increment the token_count by tokenRatio.
- //
- // If token_count is less than or equal to maxTokens / 2, then RPCs will not
- // be retried and hedged RPCs will not be sent.
- retryThrottling *retryThrottlingPolicy
-}
-
-// retryPolicy defines the go-native version of the retry policy defined by the
-// service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryPolicy struct {
- // MaxAttempts is the maximum number of attempts, including the original RPC.
- //
- // This field is required and must be two or greater.
- maxAttempts int
-
- // Exponential backoff parameters. The initial retry attempt will occur at
- // random(0, initialBackoffMS). In general, the nth attempt will occur at
- // random(0,
- // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
- //
- // These fields are required and must be greater than zero.
- initialBackoff time.Duration
- maxBackoff time.Duration
- backoffMultiplier float64
-
- // The set of status codes which may be retried.
- //
- // Status codes are specified as strings, e.g., "UNAVAILABLE".
- //
- // This field is required and must be non-empty.
- // Note: a set is used to store this for easy lookup.
- retryableStatusCodes map[codes.Code]bool
-}
-
-type jsonRetryPolicy struct {
- MaxAttempts int
- InitialBackoff string
- MaxBackoff string
- BackoffMultiplier float64
- RetryableStatusCodes []codes.Code
-}
-
-// retryThrottlingPolicy defines the go-native version of the retry throttling
-// policy defined by the service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryThrottlingPolicy struct {
- // The number of tokens starts at maxTokens. The token_count will always be
- // between 0 and maxTokens.
- //
- // This field is required and must be greater than zero.
- MaxTokens float64
- // The amount of tokens to add on each successful RPC. Typically this will
- // be some number between 0 and 1, e.g., 0.1.
- //
- // This field is required and must be greater than zero. Up to 3 decimal
- // places are supported.
- TokenRatio float64
}
func parseDuration(s *string) (*time.Duration, error) {
@@ -218,20 +140,15 @@ type jsonMC struct {
Timeout *string
MaxRequestMessageBytes *int64
MaxResponseMessageBytes *int64
- RetryPolicy *jsonRetryPolicy
}
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonSC struct {
LoadBalancingPolicy *string
MethodConfig *[]jsonMC
- RetryThrottling *retryThrottlingPolicy
}
func parseServiceConfig(js string) (ServiceConfig, error) {
- if len(js) == 0 {
- return ServiceConfig{}, fmt.Errorf("no JSON service config provided")
- }
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
@@ -239,9 +156,8 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
return ServiceConfig{}, err
}
sc := ServiceConfig{
- LB: rsc.LoadBalancingPolicy,
- Methods: make(map[string]MethodConfig),
- retryThrottling: rsc.RetryThrottling,
+ LB: rsc.LoadBalancingPolicy,
+ Methods: make(map[string]MethodConfig),
}
if rsc.MethodConfig == nil {
return sc, nil
@@ -261,10 +177,6 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
WaitForReady: m.WaitForReady,
Timeout: d,
}
- if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
- return ServiceConfig{}, err
- }
if m.MaxRequestMessageBytes != nil {
if *m.MaxRequestMessageBytes > int64(maxInt) {
mc.MaxReqSize = newInt(maxInt)
@@ -286,56 +198,9 @@ func parseServiceConfig(js string) (ServiceConfig, error) {
}
}
- if sc.retryThrottling != nil {
- if sc.retryThrottling.MaxTokens <= 0 ||
- sc.retryThrottling.MaxTokens >= 1000 ||
- sc.retryThrottling.TokenRatio <= 0 {
- // Illegal throttling config; disable throttling.
- sc.retryThrottling = nil
- }
- }
return sc, nil
}
-func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
- if jrp == nil {
- return nil, nil
- }
- ib, err := parseDuration(&jrp.InitialBackoff)
- if err != nil {
- return nil, err
- }
- mb, err := parseDuration(&jrp.MaxBackoff)
- if err != nil {
- return nil, err
- }
-
- if jrp.MaxAttempts <= 1 ||
- *ib <= 0 ||
- *mb <= 0 ||
- jrp.BackoffMultiplier <= 0 ||
- len(jrp.RetryableStatusCodes) == 0 {
- grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
- return nil, nil
- }
-
- rp := &retryPolicy{
- maxAttempts: jrp.MaxAttempts,
- initialBackoff: *ib,
- maxBackoff: *mb,
- backoffMultiplier: jrp.BackoffMultiplier,
- retryableStatusCodes: make(map[codes.Code]bool),
- }
- if rp.maxAttempts > 5 {
- // TODO(retry): Make the max maxAttempts configurable.
- rp.maxAttempts = 5
- }
- for _, code := range jrp.RetryableStatusCodes {
- rp.retryableStatusCodes[code] = true
- }
- return rp, nil
-}
-
func min(a, b *int) *int {
if *a < *b {
return a
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index 3f13190a0..d5aa2f793 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -169,8 +169,6 @@ func (s *OutTrailer) isRPCStats() {}
type End struct {
// Client is true if this End is from client side.
Client bool
- // BeginTime is the time when the RPC began.
- BeginTime time.Time
// EndTime is the time when the RPC ends.
EndTime time.Time
// Error is the error the RPC ended with. It is an error generated from
diff --git a/vendor/google.golang.org/grpc/status/go16.go b/vendor/google.golang.org/grpc/status/go16.go
deleted file mode 100644
index e59b53e82..000000000
--- a/vendor/google.golang.org/grpc/status/go16.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build go1.6,!go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package status
-
-import (
- "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
-)
-
-// FromContextError converts a context error into a Status. It returns a
-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
-// non-nil and not a context error.
-func FromContextError(err error) *Status {
- switch err {
- case nil:
- return New(codes.OK, "")
- case context.DeadlineExceeded:
- return New(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return New(codes.Canceled, err.Error())
- default:
- return New(codes.Unknown, err.Error())
- }
-}
diff --git a/vendor/google.golang.org/grpc/status/go17.go b/vendor/google.golang.org/grpc/status/go17.go
deleted file mode 100644
index 090215149..000000000
--- a/vendor/google.golang.org/grpc/status/go17.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build go1.7
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package status
-
-import (
- "context"
-
- netctx "golang.org/x/net/context"
- "google.golang.org/grpc/codes"
-)
-
-// FromContextError converts a context error into a Status. It returns a
-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
-// non-nil and not a context error.
-func FromContextError(err error) *Status {
- switch err {
- case nil:
- return New(codes.OK, "")
- case context.DeadlineExceeded, netctx.DeadlineExceeded:
- return New(codes.DeadlineExceeded, err.Error())
- case context.Canceled, netctx.Canceled:
- return New(codes.Canceled, err.Error())
- default:
- return New(codes.Unknown, err.Error())
- }
-}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index 897321bab..d9defaebc 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -46,7 +46,7 @@ func (se *statusError) Error() string {
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
}
-func (se *statusError) GRPCStatus() *Status {
+func (se *statusError) status() *Status {
return &Status{s: (*spb.Status)(se)}
}
@@ -120,25 +120,15 @@ func FromProto(s *spb.Status) *Status {
}
// FromError returns a Status representing err if it was produced from this
-// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
-// Status is returned with codes.Unknown and the original error message.
+// package, otherwise it returns nil, false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true
}
- if se, ok := err.(interface {
- GRPCStatus() *Status
- }); ok {
- return se.GRPCStatus(), true
+ if se, ok := err.(*statusError); ok {
+ return se.status(), true
}
- return New(codes.Unknown, err.Error()), false
-}
-
-// Convert is a convenience function which removes the need to handle the
-// boolean return value from FromError.
-func Convert(err error) *Status {
- s, _ := FromError(err)
- return s
+ return nil, false
}
// WithDetails returns a new status with the provided details messages appended to the status.
@@ -184,10 +174,8 @@ func Code(err error) codes.Code {
if err == nil {
return codes.OK
}
- if se, ok := err.(interface {
- GRPCStatus() *Status
- }); ok {
- return se.GRPCStatus().Code()
+ if se, ok := err.(*statusError); ok {
+ return se.status().Code()
}
return codes.Unknown
}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index b71eb3112..f91381995 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -21,8 +21,6 @@ package grpc
import (
"errors"
"io"
- "math"
- "strconv"
"sync"
"time"
@@ -31,20 +29,15 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
- "google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
+ "google.golang.org/grpc/transport"
)
// StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC. If a StreamHandler returns an error, it
-// should be produced by the status package, or else gRPC will use
-// codes.Unknown as the status code and err.Error() as the status message
-// of the RPC.
+// execution of a streaming RPC.
type StreamHandler func(srv interface{}, stream ServerStream) error
// StreamDesc represents a streaming RPC service's method specification.
@@ -58,21 +51,30 @@ type StreamDesc struct {
}
// Stream defines the common interface a client or server stream has to satisfy.
-//
-// Deprecated: See ClientStream and ServerStream documentation instead.
type Stream interface {
- // Deprecated: See ClientStream and ServerStream documentation instead.
+ // Context returns the context for this stream.
Context() context.Context
- // Deprecated: See ClientStream and ServerStream documentation instead.
+ // SendMsg blocks until it sends m, the stream is done or the stream
+ // breaks.
+ // On error, it aborts the stream and returns an RPC status on client
+ // side. On server side, it simply returns the error to the caller.
+ // SendMsg is called by generated code. Also Users can call SendMsg
+ // directly when it is really needed in their use cases.
+ // It's safe to have a goroutine calling SendMsg and another goroutine calling
+ // recvMsg on the same stream at the same time.
+ // But it is not safe to call SendMsg on the same stream in different goroutines.
SendMsg(m interface{}) error
- // Deprecated: See ClientStream and ServerStream documentation instead.
+ // RecvMsg blocks until it receives a message or the stream is
+ // done. On client side, it returns io.EOF when the stream is done. On
+ // any other error, it aborts the stream and returns an RPC status. On
+ // server side, it simply returns the error to the caller.
+ // It's safe to have a goroutine calling SendMsg and another goroutine calling
+ // recvMsg on the same stream at the same time.
+ // But it is not safe to call RecvMsg on the same stream in different goroutines.
RecvMsg(m interface{}) error
}
-// ClientStream defines the client-side behavior of a streaming RPC.
-//
-// All errors returned from ClientStream methods are compatible with the
-// status package.
+// ClientStream defines the interface a client stream has to satisfy.
type ClientStream interface {
// Header returns the header metadata received from the server if there
// is any. It blocks if the metadata is not ready to read.
@@ -84,104 +86,54 @@ type ClientStream interface {
// CloseSend closes the send direction of the stream. It closes the stream
// when non-nil error is met.
CloseSend() error
- // Context returns the context for this stream.
- //
- // It should not be called until after Header or RecvMsg has returned. Once
- // called, subsequent client-side retries are disabled.
- Context() context.Context
- // SendMsg is generally called by generated code. On error, SendMsg aborts
- // the stream. If the error was generated by the client, the status is
- // returned directly; otherwise, io.EOF is returned and the status of
- // the stream may be discovered using RecvMsg.
- //
- // SendMsg blocks until:
- // - There is sufficient flow control to schedule m with the transport, or
- // - The stream is done, or
- // - The stream breaks.
- //
- // SendMsg does not wait until the message is received by the server. An
- // untimely stream closure may result in lost messages. To ensure delivery,
- // users should ensure the RPC completed successfully using RecvMsg.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not safe
- // to call SendMsg on the same stream in different goroutines.
- SendMsg(m interface{}) error
- // RecvMsg blocks until it receives a message into m or the stream is
- // done. It returns io.EOF when the stream completes successfully. On
- // any other error, the stream is aborted and the error contains the RPC
- // status.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not
- // safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m interface{}) error
+ // Stream.SendMsg() may return a non-nil error when something wrong happens sending
+ // the request. The returned error indicates the status of this sending, not the final
+ // status of the RPC.
+ // Always call Stream.RecvMsg() to get the final status if you care about the status of
+ // the RPC.
+ Stream
}
// NewStream creates a new Stream for the client side. This is typically
-// called by generated code. ctx is used for the lifetime of the stream.
-//
-// To ensure resources are not leaked due to the stream returned, one of the following
-// actions must be performed:
-//
-// 1. Call Close on the ClientConn.
-// 2. Cancel the context provided.
-// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
-// client-streaming RPC, for instance, might use the helper function
-// CloseAndRecv (note that CloseSend does not Recv, therefore is not
-// guaranteed to release all resources).
-// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
-//
-// If none of the above happen, a goroutine and a context will be leaked, and grpc
-// will not call the optionally-configured stats handler with a stats.End message.
+// called by generated code.
func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
- // allow interceptor to see all applicable call options, which means those
- // configured as defaults from dial option as well as per-call options
- opts = combine(cc.dopts.callOptions, opts)
-
if cc.dopts.streamInt != nil {
return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...)
}
return newClientStream(ctx, desc, cc, method, opts...)
}
-// NewClientStream is a wrapper for ClientConn.NewStream.
+// NewClientStream creates a new Stream for the client side. This is typically
+// called by generated code.
+//
+// DEPRECATED: Use ClientConn.NewStream instead.
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
return cc.NewStream(ctx, desc, method, opts...)
}
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
- if channelz.IsOn() {
- cc.incrCallsStarted()
- defer func() {
- if err != nil {
- cc.incrCallsFailed()
- }
- }()
- }
+ var (
+ t transport.ClientTransport
+ s *transport.Stream
+ done func(balancer.DoneInfo)
+ cancel context.CancelFunc
+ )
c := defaultCallInfo()
mc := cc.GetMethodConfig(method)
if mc.WaitForReady != nil {
c.failFast = !*mc.WaitForReady
}
- // Possible context leak:
- // The cancel function for the child context we create will only be called
- // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if
- // an error is generated by SendMsg.
- // https://github.com/grpc/grpc-go/issues/1818.
- var cancel context.CancelFunc
if mc.Timeout != nil && *mc.Timeout >= 0 {
ctx, cancel = context.WithTimeout(ctx, *mc.Timeout)
- } else {
- ctx, cancel = context.WithCancel(ctx)
+ defer func() {
+ if err != nil {
+ cancel()
+ }
+ }()
}
- defer func() {
- if err != nil {
- cancel()
- }
- }()
+ opts = append(cc.dopts.callOptions, opts...)
for _, o := range opts {
if err := o.before(c); err != nil {
return nil, toRPCErr(err)
@@ -189,14 +141,15 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
- if err := setCallInfoCodec(c); err != nil {
- return nil, err
- }
callHdr := &transport.CallHdr{
- Host: cc.authority,
- Method: method,
- ContentSubtype: c.contentSubtype,
+ Host: cc.authority,
+ Method: method,
+ // If it's not client streaming, we should already have the request to be sent,
+ // so we don't flush the header.
+ // If it's client streaming, the user may never send a request or send it any
+ // time soon, so we ask the transport to flush the header.
+ Flush: desc.ClientStreams,
}
// Set our outgoing compression according to the UseCompressor CallOption, if
@@ -229,631 +182,394 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
}
trInfo.tr.LazyLog(&trInfo.firstLine, false)
ctx = trace.NewContext(ctx, trInfo.tr)
+ defer func() {
+ if err != nil {
+ // Need to call tr.finish() if error is returned.
+ // Because tr will not be returned to caller.
+ trInfo.tr.LazyPrintf("RPC: [%v]", err)
+ trInfo.tr.SetError()
+ trInfo.tr.Finish()
+ }
+ }()
}
ctx = newContextWithRPCInfo(ctx, c.failFast)
sh := cc.dopts.copts.StatsHandler
- var beginTime time.Time
if sh != nil {
ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
- beginTime = time.Now()
begin := &stats.Begin{
Client: true,
- BeginTime: beginTime,
+ BeginTime: time.Now(),
FailFast: c.failFast,
}
sh.HandleRPC(ctx, begin)
- }
-
- cs := &clientStream{
- callHdr: callHdr,
- ctx: ctx,
- methodConfig: &mc,
- opts: opts,
- callInfo: c,
- cc: cc,
- desc: desc,
- codec: c.codec,
- cp: cp,
- comp: comp,
- cancel: cancel,
- beginTime: beginTime,
- firstAttempt: true,
- }
- if !cc.dopts.disableRetry {
- cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
- }
-
- cs.callInfo.stream = cs
- // Only this initial attempt has stats/tracing.
- // TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
- if err := cs.newAttemptLocked(sh, trInfo); err != nil {
- cs.finish(err)
- return nil, err
- }
-
- op := func(a *csAttempt) error { return a.newStream() }
- if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
- cs.finish(err)
- return nil, err
- }
-
- if desc != unaryStreamDesc {
- // Listen on cc and stream contexts to cleanup when the user closes the
- // ClientConn or cancels the stream context. In all other cases, an error
- // should already be injected into the recv buffer by the transport, which
- // the client will eventually receive, and then we will cancel the stream's
- // context in clientStream.finish.
- go func() {
- select {
- case <-cc.ctx.Done():
- cs.finish(ErrClientConnClosing)
- case <-ctx.Done():
- cs.finish(toRPCErr(ctx.Err()))
+ defer func() {
+ if err != nil {
+ // Only handle end stats if err != nil.
+ end := &stats.End{
+ Client: true,
+ Error: err,
+ }
+ sh.HandleRPC(ctx, end)
}
}()
}
- return cs, nil
-}
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo traceInfo) error {
- cs.attempt = &csAttempt{
- cs: cs,
- dc: cs.cc.dopts.dc,
- statsHandler: sh,
- trInfo: trInfo,
- }
+ for {
+ // Check to make sure the context has expired. This will prevent us from
+ // looping forever if an error occurs for wait-for-ready RPCs where no data
+ // is sent on the wire.
+ select {
+ case <-ctx.Done():
+ return nil, toRPCErr(ctx.Err())
+ default:
+ }
- if err := cs.ctx.Err(); err != nil {
- return toRPCErr(err)
- }
- t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
- if err != nil {
- return err
+ t, done, err = cc.getTransport(ctx, c.failFast)
+ if err != nil {
+ return nil, err
+ }
+
+ s, err = t.NewStream(ctx, callHdr)
+ if err != nil {
+ if done != nil {
+ doneInfo := balancer.DoneInfo{Err: err}
+ if _, ok := err.(transport.ConnectionError); ok {
+ // If error is connection error, transport was sending data on wire,
+ // and we are not sure if anything has been sent on wire.
+ // If error is not connection error, we are sure nothing has been sent.
+ doneInfo.BytesSent = true
+ }
+ done(doneInfo)
+ done = nil
+ }
+ // In the event of any error from NewStream, we never attempted to write
+ // anything to the wire, so we can retry indefinitely for non-fail-fast
+ // RPCs.
+ if !c.failFast {
+ continue
+ }
+ return nil, toRPCErr(err)
+ }
+ break
}
- cs.attempt.t = t
- cs.attempt.done = done
- return nil
-}
-func (a *csAttempt) newStream() error {
- cs := a.cs
- cs.callHdr.PreviousAttempts = cs.numRetries
- s, err := a.t.NewStream(cs.ctx, cs.callHdr)
- if err != nil {
- return toRPCErr(err)
+ // Set callInfo.peer object from stream's context.
+ if peer, ok := peer.FromContext(s.Context()); ok {
+ c.peer = peer
}
- cs.attempt.s = s
- cs.attempt.p = &parser{r: s}
- return nil
+ cs := &clientStream{
+ opts: opts,
+ c: c,
+ desc: desc,
+ codec: cc.dopts.codec,
+ cp: cp,
+ dc: cc.dopts.dc,
+ comp: comp,
+ cancel: cancel,
+
+ done: done,
+ t: t,
+ s: s,
+ p: &parser{r: s},
+
+ tracing: EnableTracing,
+ trInfo: trInfo,
+
+ statsCtx: ctx,
+ statsHandler: cc.dopts.copts.StatsHandler,
+ }
+ // Listen on s.Context().Done() to detect cancellation and s.Done() to detect
+ // normal termination when there is no pending I/O operations on this stream.
+ go func() {
+ select {
+ case <-t.Error():
+ // Incur transport error, simply exit.
+ case <-cc.ctx.Done():
+ cs.finish(ErrClientConnClosing)
+ cs.closeTransportStream(ErrClientConnClosing)
+ case <-s.Done():
+ // TODO: The trace of the RPC is terminated here when there is no pending
+ // I/O, which is probably not the optimal solution.
+ cs.finish(s.Status().Err())
+ cs.closeTransportStream(nil)
+ case <-s.GoAway():
+ cs.finish(errConnDrain)
+ cs.closeTransportStream(errConnDrain)
+ case <-s.Context().Done():
+ err := s.Context().Err()
+ cs.finish(err)
+ cs.closeTransportStream(transport.ContextErr(err))
+ }
+ }()
+ return cs, nil
}
// clientStream implements a client side Stream.
type clientStream struct {
- callHdr *transport.CallHdr
- opts []CallOption
- callInfo *callInfo
- cc *ClientConn
- desc *StreamDesc
-
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
-
- cancel context.CancelFunc // cancels all attempts
-
- sentLast bool // sent an end stream
- beginTime time.Time
-
- methodConfig *MethodConfig
-
- ctx context.Context // the application's context, wrapped by stats/tracing
-
- retryThrottler *retryThrottler // The throttler active when the RPC began.
-
- mu sync.Mutex
- firstAttempt bool // if true, transparent retry is valid
- numRetries int // exclusive of transparent retry attempt(s)
- numRetriesSincePushback int // retries since pushback; to reset backoff
- finished bool // TODO: replace with atomic cmpxchg or sync.Once?
- attempt *csAttempt // the active client stream attempt
- // TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
- buffer []func(a *csAttempt) error // operations to replay on retry
- bufferSize int // current size of buffer
-}
-
-// csAttempt implements a single transport stream attempt within a
-// clientStream.
-type csAttempt struct {
- cs *clientStream
+ opts []CallOption
+ c *callInfo
t transport.ClientTransport
s *transport.Stream
p *parser
- done func(balancer.DoneInfo)
+ desc *StreamDesc
- finished bool
+ codec Codec
+ cp Compressor
dc Decompressor
+ comp encoding.Compressor
decomp encoding.Compressor
decompSet bool
- mu sync.Mutex // guards trInfo.tr
- // trInfo.tr is set when created (if EnableTracing is true),
- // and cleared when the finish method is called.
- trInfo traceInfo
-
- statsHandler stats.Handler
-}
-
-func (cs *clientStream) commitAttemptLocked() {
- cs.committed = true
- cs.buffer = nil
-}
-
-func (cs *clientStream) commitAttempt() {
- cs.mu.Lock()
- cs.commitAttemptLocked()
- cs.mu.Unlock()
-}
-
-// shouldRetry returns nil if the RPC should be retried; otherwise it returns
-// the error that should be returned by the operation.
-func (cs *clientStream) shouldRetry(err error) error {
- if cs.attempt.s == nil && !cs.callInfo.failFast {
- // In the event of any error from NewStream (attempt.s == nil), we
- // never attempted to write anything to the wire, so we can retry
- // indefinitely for non-fail-fast RPCs.
- return nil
- }
- if cs.finished || cs.committed {
- // RPC is finished or committed; cannot retry.
- return err
- }
- // Wait for the trailers.
- if cs.attempt.s != nil {
- <-cs.attempt.s.Done()
- }
- if cs.firstAttempt && !cs.callInfo.failFast && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
- // First attempt, wait-for-ready, stream unprocessed: transparently retry.
- cs.firstAttempt = false
- return nil
- }
- cs.firstAttempt = false
- if cs.cc.dopts.disableRetry {
- return err
- }
-
- pushback := 0
- hasPushback := false
- if cs.attempt.s != nil {
- if to, toErr := cs.attempt.s.TrailersOnly(); toErr != nil {
- // Context error; stop now.
- return toErr
- } else if !to {
- return err
- }
-
- // TODO(retry): Move down if the spec changes to not check server pushback
- // before considering this a failure for throttling.
- sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
- if len(sps) == 1 {
- var e error
- if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
- grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
- cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return err
- }
- hasPushback = true
- } else if len(sps) > 1 {
- grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
- cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return err
- }
- }
-
- var code codes.Code
- if cs.attempt.s != nil {
- code = cs.attempt.s.Status().Code()
- } else {
- code = status.Convert(err).Code()
- }
-
- rp := cs.methodConfig.retryPolicy
- if rp == nil || !rp.retryableStatusCodes[code] {
- return err
- }
-
- // Note: the ordering here is important; we count this as a failure
- // only if the code matched a retryable code.
- if cs.retryThrottler.throttle() {
- return err
- }
- if cs.numRetries+1 >= rp.maxAttempts {
- return err
- }
+ cancel context.CancelFunc
- var dur time.Duration
- if hasPushback {
- dur = time.Millisecond * time.Duration(pushback)
- cs.numRetriesSincePushback = 0
- } else {
- fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
- cur := float64(rp.initialBackoff) * fact
- if max := float64(rp.maxBackoff); cur > max {
- cur = max
- }
- dur = time.Duration(grpcrand.Int63n(int64(cur)))
- cs.numRetriesSincePushback++
- }
+ tracing bool // set to EnableTracing when the clientStream is created.
- // TODO(dfawley): we could eagerly fail here if dur puts us past the
- // deadline, but unsure if it is worth doing.
- t := time.NewTimer(dur)
- select {
- case <-t.C:
- cs.numRetries++
- return nil
- case <-cs.ctx.Done():
- t.Stop()
- return status.FromContextError(cs.ctx.Err()).Err()
- }
-}
+ mu sync.Mutex
+ done func(balancer.DoneInfo)
+ closed bool
+ finished bool
+ // trInfo.tr is set when the clientStream is created (if EnableTracing is true),
+ // and is set to nil when the clientStream's finish method is called.
+ trInfo traceInfo
-// Returns nil if a retry was performed and succeeded; error otherwise.
-func (cs *clientStream) retryLocked(lastErr error) error {
- for {
- cs.attempt.finish(lastErr)
- if err := cs.shouldRetry(lastErr); err != nil {
- cs.commitAttemptLocked()
- return err
- }
- if err := cs.newAttemptLocked(nil, traceInfo{}); err != nil {
- return err
- }
- if lastErr = cs.replayBufferLocked(); lastErr == nil {
- return nil
- }
- }
+ // statsCtx keeps the user context for stats handling.
+ // All stats collection should use the statsCtx (instead of the stream context)
+ // so that all the generated stats for a particular RPC can be associated in the processing phase.
+ statsCtx context.Context
+ statsHandler stats.Handler
}
func (cs *clientStream) Context() context.Context {
- cs.commitAttempt()
- // No need to lock before using attempt, since we know it is committed and
- // cannot change.
- return cs.attempt.s.Context()
-}
-
-func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
- cs.mu.Lock()
- for {
- if cs.committed {
- cs.mu.Unlock()
- return op(cs.attempt)
- }
- a := cs.attempt
- cs.mu.Unlock()
- err := op(a)
- cs.mu.Lock()
- if a != cs.attempt {
- // We started another attempt already.
- continue
- }
- if err == io.EOF {
- <-a.s.Done()
- }
- if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
- onSuccess()
- cs.mu.Unlock()
- return err
- }
- if err := cs.retryLocked(err); err != nil {
- cs.mu.Unlock()
- return err
- }
- }
+ return cs.s.Context()
}
func (cs *clientStream) Header() (metadata.MD, error) {
- var m metadata.MD
- err := cs.withRetry(func(a *csAttempt) error {
- var err error
- m, err = a.s.Header()
- return toRPCErr(err)
- }, cs.commitAttemptLocked)
+ m, err := cs.s.Header()
if err != nil {
- cs.finish(err)
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
}
return m, err
}
func (cs *clientStream) Trailer() metadata.MD {
- // On RPC failure, we never need to retry, because usage requires that
- // RecvMsg() returned a non-nil error before calling this function is valid.
- // We would have retried earlier if necessary.
- //
- // Commit the attempt anyway, just in case users are not following those
- // directions -- it will prevent races and should not meaningfully impact
- // performance.
- cs.commitAttempt()
- if cs.attempt.s == nil {
- return nil
- }
- return cs.attempt.s.Trailer()
+ return cs.s.Trailer()
}
-func (cs *clientStream) replayBufferLocked() error {
- a := cs.attempt
- for _, f := range cs.buffer {
- if err := f(a); err != nil {
- return err
+func (cs *clientStream) SendMsg(m interface{}) (err error) {
+ if cs.tracing {
+ cs.mu.Lock()
+ if cs.trInfo.tr != nil {
+ cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
}
+ cs.mu.Unlock()
}
- return nil
-}
-
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
- // Note: we still will buffer if retry is disabled (for transparent retries).
- if cs.committed {
- return
- }
- cs.bufferSize += sz
- if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
- cs.commitAttemptLocked()
- return
- }
- cs.buffer = append(cs.buffer, op)
-}
-
-func (cs *clientStream) SendMsg(m interface{}) (err error) {
+ // TODO Investigate how to signal the stats handling party.
+ // generate error stats if err != nil && err != io.EOF?
defer func() {
- if err != nil && err != io.EOF {
- // Call finish on the client stream for errors generated by this SendMsg
- // call, as these indicate problems created by this client. (Transport
- // errors are converted to an io.EOF error in csAttempt.sendMsg; the real
- // error will be returned from RecvMsg eventually in that case, or be
- // retried.)
+ if err != nil {
cs.finish(err)
}
+ if err == nil {
+ return
+ }
+ if err == io.EOF {
+ // Specialize the process for server streaming. SendMsg is only called
+ // once when creating the stream object. io.EOF needs to be skipped when
+ // the rpc is early finished (before the stream object is created.).
+ // TODO: It is probably better to move this into the generated code.
+ if !cs.desc.ClientStreams && cs.desc.ServerStreams {
+ err = nil
+ }
+ return
+ }
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ err = toRPCErr(err)
}()
- if cs.sentLast {
- return status.Errorf(codes.Internal, "SendMsg called after CloseSend")
- }
- if !cs.desc.ClientStreams {
- cs.sentLast = true
- }
- data, err := encode(cs.codec, m)
- if err != nil {
- return err
+ var outPayload *stats.OutPayload
+ if cs.statsHandler != nil {
+ outPayload = &stats.OutPayload{
+ Client: true,
+ }
}
- compData, err := compress(data, cs.cp, cs.comp)
+ hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp)
if err != nil {
return err
}
- hdr, payload := msgHeader(data, compData)
- // TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > *cs.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+ if cs.c.maxSendMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxSendMessageSize field uninitialized(nil)")
}
- op := func(a *csAttempt) error {
- err := a.sendMsg(m, hdr, payload, data)
- // nil out the message and uncomp when replaying; they are only needed for
- // stats which is disabled for subsequent attempts.
- m, data = nil, nil
- return err
+ if len(data) > *cs.c.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize)
}
- return cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
-}
-
-func (cs *clientStream) RecvMsg(m interface{}) error {
- err := cs.withRetry(func(a *csAttempt) error {
- return a.recvMsg(m)
- }, cs.commitAttemptLocked)
- if err != nil || !cs.desc.ServerStreams {
- // err != nil or non-server-streaming indicates end of stream.
- cs.finish(err)
+ err = cs.t.Write(cs.s, hdr, data, &transport.Options{Last: false})
+ if err == nil && outPayload != nil {
+ outPayload.SentTime = time.Now()
+ cs.statsHandler.HandleRPC(cs.statsCtx, outPayload)
}
return err
}
-func (cs *clientStream) CloseSend() error {
- if cs.sentLast {
- // TODO: return an error and finish the stream instead, due to API misuse?
- return nil
- }
- cs.sentLast = true
- op := func(a *csAttempt) error {
- a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
- // Always return nil; io.EOF is the only error that might make sense
- // instead, but there is no need to signal the client to call RecvMsg
- // as the only use left for the stream after CloseSend is to call
- // RecvMsg. This also matches historical behavior.
- return nil
- }
- cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
- // We never returned an error here for reasons.
- return nil
-}
-
-func (cs *clientStream) finish(err error) {
- if err == io.EOF {
- // Ending a stream with EOF indicates a success.
- err = nil
- }
- cs.mu.Lock()
- if cs.finished {
- cs.mu.Unlock()
- return
- }
- cs.finished = true
- cs.commitAttemptLocked()
- cs.mu.Unlock()
- if err == nil {
- cs.retryThrottler.successfulRPC()
- }
- if channelz.IsOn() {
- if err != nil {
- cs.cc.incrCallsFailed()
- } else {
- cs.cc.incrCallsSucceeded()
- }
- }
- if cs.attempt != nil {
- cs.attempt.finish(err)
- }
- // after functions all rely upon having a stream.
- if cs.attempt.s != nil {
- for _, o := range cs.opts {
- o.after(cs.callInfo)
- }
- }
- cs.cancel()
-}
-
-func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
- cs := a.cs
- if EnableTracing {
- a.mu.Lock()
- if a.trInfo.tr != nil {
- a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
- }
- a.mu.Unlock()
- }
- if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
- if !cs.desc.ClientStreams {
- // For non-client-streaming RPCs, we return nil instead of EOF on error
- // because the generated code requires it. finish is not called; RecvMsg()
- // will call it with the stream's status independently.
- return nil
- }
- return io.EOF
- }
- if a.statsHandler != nil {
- a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
- }
- if channelz.IsOn() {
- a.t.IncrMsgSent()
- }
- return nil
-}
-
-func (a *csAttempt) recvMsg(m interface{}) (err error) {
- cs := a.cs
+func (cs *clientStream) RecvMsg(m interface{}) (err error) {
var inPayload *stats.InPayload
- if a.statsHandler != nil {
+ if cs.statsHandler != nil {
inPayload = &stats.InPayload{
Client: true,
}
}
- if !a.decompSet {
+ if cs.c.maxReceiveMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ }
+ if !cs.decompSet {
// Block until we receive headers containing received message encoding.
- if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
- if a.dc == nil || a.dc.Type() != ct {
+ if ct := cs.s.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if cs.dc == nil || cs.dc.Type() != ct {
// No configured decompressor, or it does not match the incoming
// message encoding; attempt to find a registered compressor that does.
- a.dc = nil
- a.decomp = encoding.GetCompressor(ct)
+ cs.dc = nil
+ cs.decomp = encoding.GetCompressor(ct)
}
} else {
// No compression is used; disable our decompressor.
- a.dc = nil
+ cs.dc = nil
}
// Only initialize this state once per stream.
- a.decompSet = true
+ cs.decompSet = true
}
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, inPayload, a.decomp)
- if err != nil {
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, inPayload, cs.decomp)
+ defer func() {
+ // err != nil indicates the termination of the stream.
+ if err != nil {
+ cs.finish(err)
+ }
+ }()
+ if err == nil {
+ if cs.tracing {
+ cs.mu.Lock()
+ if cs.trInfo.tr != nil {
+ cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+ }
+ cs.mu.Unlock()
+ }
+ if inPayload != nil {
+ cs.statsHandler.HandleRPC(cs.statsCtx, inPayload)
+ }
+ if !cs.desc.ClientStreams || cs.desc.ServerStreams {
+ return
+ }
+ // Special handling for client streaming rpc.
+ // This recv expects EOF or errors, so we don't collect inPayload.
+ if cs.c.maxReceiveMessageSize == nil {
+ return status.Errorf(codes.Internal, "callInfo maxReceiveMessageSize field uninitialized(nil)")
+ }
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m, *cs.c.maxReceiveMessageSize, nil, cs.decomp)
+ cs.closeTransportStream(err)
+ if err == nil {
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ }
if err == io.EOF {
- if statusErr := a.s.Status().Err(); statusErr != nil {
- return statusErr
+ if se := cs.s.Status().Err(); se != nil {
+ return se
}
- return io.EOF // indicates successful end of stream.
+ cs.finish(err)
+ return nil
}
return toRPCErr(err)
}
- if EnableTracing {
- a.mu.Lock()
- if a.trInfo.tr != nil {
- a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
- }
- a.mu.Unlock()
- }
- if inPayload != nil {
- a.statsHandler.HandleRPC(cs.ctx, inPayload)
- }
- if channelz.IsOn() {
- a.t.IncrMsgRecv()
- }
- if cs.desc.ServerStreams {
- // Subsequent messages should be received by subsequent RecvMsg calls.
- return nil
- }
-
- // Special handling for non-server-stream rpcs.
- // This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
}
if err == io.EOF {
- return a.s.Status().Err() // non-server streaming Recv returns nil on success
+ if statusErr := cs.s.Status().Err(); statusErr != nil {
+ return statusErr
+ }
+ // Returns io.EOF to indicate the end of the stream.
+ return
}
return toRPCErr(err)
}
-func (a *csAttempt) finish(err error) {
- a.mu.Lock()
- if a.finished {
- a.mu.Unlock()
- return
+func (cs *clientStream) CloseSend() (err error) {
+ err = cs.t.Write(cs.s, nil, nil, &transport.Options{Last: true})
+ defer func() {
+ if err != nil {
+ cs.finish(err)
+ }
+ }()
+ if err == nil || err == io.EOF {
+ return nil
}
- a.finished = true
- if err == io.EOF {
- // Ending a stream with EOF indicates a success.
- err = nil
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
}
- if a.s != nil {
- a.t.CloseStream(a.s, err)
+ err = toRPCErr(err)
+ return
+}
+
+func (cs *clientStream) closeTransportStream(err error) {
+ cs.mu.Lock()
+ if cs.closed {
+ cs.mu.Unlock()
+ return
}
+ cs.closed = true
+ cs.mu.Unlock()
+ cs.t.CloseStream(cs.s, err)
+}
- if a.done != nil {
- br := false
- var tr metadata.MD
- if a.s != nil {
- br = a.s.BytesReceived()
- tr = a.s.Trailer()
+func (cs *clientStream) finish(err error) {
+ cs.mu.Lock()
+ defer cs.mu.Unlock()
+ if cs.finished {
+ return
+ }
+ cs.finished = true
+ defer func() {
+ if cs.cancel != nil {
+ cs.cancel()
}
- a.done(balancer.DoneInfo{
+ }()
+ for _, o := range cs.opts {
+ o.after(cs.c)
+ }
+ if cs.done != nil {
+ cs.done(balancer.DoneInfo{
Err: err,
- Trailer: tr,
- BytesSent: a.s != nil,
- BytesReceived: br,
+ BytesSent: true,
+ BytesReceived: cs.s.BytesReceived(),
})
+ cs.done = nil
}
- if a.statsHandler != nil {
+ if cs.statsHandler != nil {
end := &stats.End{
- Client: true,
- BeginTime: a.cs.beginTime,
- EndTime: time.Now(),
- Error: err,
+ Client: true,
+ EndTime: time.Now(),
+ }
+ if err != io.EOF {
+ // end.Error is nil if the RPC finished successfully.
+ end.Error = toRPCErr(err)
}
- a.statsHandler.HandleRPC(a.cs.ctx, end)
+ cs.statsHandler.HandleRPC(cs.statsCtx, end)
}
- if a.trInfo.tr != nil {
- if err == nil {
- a.trInfo.tr.LazyPrintf("RPC: [OK]")
+ if !cs.tracing {
+ return
+ }
+ if cs.trInfo.tr != nil {
+ if err == nil || err == io.EOF {
+ cs.trInfo.tr.LazyPrintf("RPC: [OK]")
} else {
- a.trInfo.tr.LazyPrintf("RPC: [%v]", err)
- a.trInfo.tr.SetError()
+ cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+ cs.trInfo.tr.SetError()
}
- a.trInfo.tr.Finish()
- a.trInfo.tr = nil
+ cs.trInfo.tr.Finish()
+ cs.trInfo.tr = nil
}
- a.mu.Unlock()
}
-// ServerStream defines the server-side behavior of a streaming RPC.
-//
-// All errors returned from ServerStream methods are compatible with the
-// status package.
+// ServerStream defines the interface a server stream has to satisfy.
type ServerStream interface {
// SetHeader sets the header metadata. It may be called multiple times.
// When call multiple times, all the provided metadata will be merged.
@@ -869,41 +585,15 @@ type ServerStream interface {
// SetTrailer sets the trailer metadata which will be sent with the RPC status.
// When called more than once, all the provided metadata will be merged.
SetTrailer(metadata.MD)
- // Context returns the context for this stream.
- Context() context.Context
- // SendMsg sends a message. On error, SendMsg aborts the stream and the
- // error is returned directly.
- //
- // SendMsg blocks until:
- // - There is sufficient flow control to schedule m with the transport, or
- // - The stream is done, or
- // - The stream breaks.
- //
- // SendMsg does not wait until the message is received by the client. An
- // untimely stream closure may result in lost messages.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not safe
- // to call SendMsg on the same stream in different goroutines.
- SendMsg(m interface{}) error
- // RecvMsg blocks until it receives a message into m or the stream is
- // done. It returns io.EOF when the client has performed a CloseSend. On
- // any non-EOF error, the stream is aborted and the error contains the
- // RPC status.
- //
- // It is safe to have a goroutine calling SendMsg and another goroutine
- // calling RecvMsg on the same stream at the same time, but it is not
- // safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m interface{}) error
+ Stream
}
// serverStream implements a server side Stream.
type serverStream struct {
- ctx context.Context
t transport.ServerTransport
s *transport.Stream
p *parser
- codec baseCodec
+ codec Codec
cp Compressor
dc Decompressor
@@ -920,7 +610,7 @@ type serverStream struct {
}
func (ss *serverStream) Context() context.Context {
- return ss.ctx
+ return ss.s.Context()
}
func (ss *serverStream) SetHeader(md metadata.MD) error {
@@ -939,6 +629,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
return
}
ss.s.SetTrailer(md)
+ return
}
func (ss *serverStream) SendMsg(m interface{}) (err error) {
@@ -959,28 +650,24 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) {
st, _ := status.FromError(toRPCErr(err))
ss.t.WriteStatus(ss.s, st)
}
- if channelz.IsOn() && err == nil {
- ss.t.IncrMsgSent()
- }
}()
- data, err := encode(ss.codec, m)
- if err != nil {
- return err
+ var outPayload *stats.OutPayload
+ if ss.statsHandler != nil {
+ outPayload = &stats.OutPayload{}
}
- compData, err := compress(data, ss.cp, ss.comp)
+ hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp)
if err != nil {
return err
}
- hdr, payload := msgHeader(data, compData)
- // TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > ss.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+ if len(data) > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize)
}
- if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
+ if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
- if ss.statsHandler != nil {
- ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+ if outPayload != nil {
+ outPayload.SentTime = time.Now()
+ ss.statsHandler.HandleRPC(ss.s.Context(), outPayload)
}
return nil
}
@@ -1003,9 +690,6 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
st, _ := status.FromError(toRPCErr(err))
ss.t.WriteStatus(ss.s, st)
}
- if channelz.IsOn() && err == nil {
- ss.t.IncrMsgRecv()
- }
}()
var inPayload *stats.InPayload
if ss.statsHandler != nil {
@@ -1029,5 +713,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) {
// MethodFromServerStream returns the method string for the input stream.
// The returned string is in the format of "/service/method".
func MethodFromServerStream(stream ServerStream) (string, bool) {
- return Method(stream.Context())
+ s, ok := transport.StreamFromContext(stream.Context())
+ if !ok {
+ return "", ok
+ }
+ return s.Method(), ok
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go
index 63cd2627c..63cd2627c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/bdp_estimator.go
+++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go
diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go
new file mode 100644
index 000000000..0474b0907
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/control.go
@@ -0,0 +1,334 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "fmt"
+ "io"
+ "math"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+)
+
+const (
+ // The default value of flow control window size in HTTP2 spec.
+ defaultWindowSize = 65535
+ // The initial window size for flow control.
+ initialWindowSize = defaultWindowSize // for an RPC
+ infinity = time.Duration(math.MaxInt64)
+ defaultClientKeepaliveTime = infinity
+ defaultClientKeepaliveTimeout = time.Duration(20 * time.Second)
+ defaultMaxStreamsClient = 100
+ defaultMaxConnectionIdle = infinity
+ defaultMaxConnectionAge = infinity
+ defaultMaxConnectionAgeGrace = infinity
+ defaultServerKeepaliveTime = time.Duration(2 * time.Hour)
+ defaultServerKeepaliveTimeout = time.Duration(20 * time.Second)
+ defaultKeepalivePolicyMinTime = time.Duration(5 * time.Minute)
+ // max window limit set by HTTP2 Specs.
+ maxWindowSize = math.MaxInt32
+ // defaultLocalSendQuota sets is default value for number of data
+ // bytes that each stream can schedule before some of it being
+ // flushed out.
+ defaultLocalSendQuota = 128 * 1024
+)
+
+// The following defines various control items which could flow through
+// the control buffer of transport. They represent different aspects of
+// control tasks, e.g., flow control, settings, streaming resetting, etc.
+
+type headerFrame struct {
+ streamID uint32
+ hf []hpack.HeaderField
+ endStream bool
+}
+
+func (*headerFrame) item() {}
+
+type continuationFrame struct {
+ streamID uint32
+ endHeaders bool
+ headerBlockFragment []byte
+}
+
+type dataFrame struct {
+ streamID uint32
+ endStream bool
+ d []byte
+ f func()
+}
+
+func (*dataFrame) item() {}
+
+func (*continuationFrame) item() {}
+
+type windowUpdate struct {
+ streamID uint32
+ increment uint32
+}
+
+func (*windowUpdate) item() {}
+
+type settings struct {
+ ss []http2.Setting
+}
+
+func (*settings) item() {}
+
+type settingsAck struct {
+}
+
+func (*settingsAck) item() {}
+
+type resetStream struct {
+ streamID uint32
+ code http2.ErrCode
+}
+
+func (*resetStream) item() {}
+
+type goAway struct {
+ code http2.ErrCode
+ debugData []byte
+ headsUp bool
+ closeConn bool
+}
+
+func (*goAway) item() {}
+
+type flushIO struct {
+ closeTr bool
+}
+
+func (*flushIO) item() {}
+
+type ping struct {
+ ack bool
+ data [8]byte
+}
+
+func (*ping) item() {}
+
+// quotaPool is a pool which accumulates the quota and sends it to acquire()
+// when it is available.
+type quotaPool struct {
+ mu sync.Mutex
+ c chan struct{}
+ version uint32
+ quota int
+}
+
+// newQuotaPool creates a quotaPool which has quota q available to consume.
+func newQuotaPool(q int) *quotaPool {
+ qb := &quotaPool{
+ quota: q,
+ c: make(chan struct{}, 1),
+ }
+ return qb
+}
+
+// add cancels the pending quota sent on acquired, incremented by v and sends
+// it back on acquire.
+func (qb *quotaPool) add(v int) {
+ qb.mu.Lock()
+ defer qb.mu.Unlock()
+ qb.lockedAdd(v)
+}
+
+func (qb *quotaPool) lockedAdd(v int) {
+ var wakeUp bool
+ if qb.quota <= 0 {
+ wakeUp = true // Wake up potential waiters.
+ }
+ qb.quota += v
+ if wakeUp && qb.quota > 0 {
+ select {
+ case qb.c <- struct{}{}:
+ default:
+ }
+ }
+}
+
+func (qb *quotaPool) addAndUpdate(v int) {
+ qb.mu.Lock()
+ qb.lockedAdd(v)
+ qb.version++
+ qb.mu.Unlock()
+}
+
+func (qb *quotaPool) get(v int, wc waiters) (int, uint32, error) {
+ qb.mu.Lock()
+ if qb.quota > 0 {
+ if v > qb.quota {
+ v = qb.quota
+ }
+ qb.quota -= v
+ ver := qb.version
+ qb.mu.Unlock()
+ return v, ver, nil
+ }
+ qb.mu.Unlock()
+ for {
+ select {
+ case <-wc.ctx.Done():
+ return 0, 0, ContextErr(wc.ctx.Err())
+ case <-wc.tctx.Done():
+ return 0, 0, ErrConnClosing
+ case <-wc.done:
+ return 0, 0, io.EOF
+ case <-wc.goAway:
+ return 0, 0, errStreamDrain
+ case <-qb.c:
+ qb.mu.Lock()
+ if qb.quota > 0 {
+ if v > qb.quota {
+ v = qb.quota
+ }
+ qb.quota -= v
+ ver := qb.version
+ if qb.quota > 0 {
+ select {
+ case qb.c <- struct{}{}:
+ default:
+ }
+ }
+ qb.mu.Unlock()
+ return v, ver, nil
+
+ }
+ qb.mu.Unlock()
+ }
+ }
+}
+
+func (qb *quotaPool) compareAndExecute(version uint32, success, failure func()) bool {
+ qb.mu.Lock()
+ if version == qb.version {
+ success()
+ qb.mu.Unlock()
+ return true
+ }
+ failure()
+ qb.mu.Unlock()
+ return false
+}
+
+// inFlow deals with inbound flow control
+type inFlow struct {
+ mu sync.Mutex
+ // The inbound flow control limit for pending data.
+ limit uint32
+ // pendingData is the overall data which have been received but not been
+ // consumed by applications.
+ pendingData uint32
+ // The amount of data the application has consumed but grpc has not sent
+ // window update for them. Used to reduce window update frequency.
+ pendingUpdate uint32
+ // delta is the extra window update given by receiver when an application
+ // is reading data bigger in size than the inFlow limit.
+ delta uint32
+}
+
+// newLimit updates the inflow window to a new value n.
+// It assumes that n is always greater than the old limit.
+func (f *inFlow) newLimit(n uint32) uint32 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ d := n - f.limit
+ f.limit = n
+ return d
+}
+
+func (f *inFlow) maybeAdjust(n uint32) uint32 {
+ if n > uint32(math.MaxInt32) {
+ n = uint32(math.MaxInt32)
+ }
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ // estSenderQuota is the receiver's view of the maximum number of bytes the sender
+ // can send without a window update.
+ estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate))
+ // estUntransmittedData is the maximum number of bytes the sends might not have put
+ // on the wire yet. A value of 0 or less means that we have already received all or
+ // more bytes than the application is requesting to read.
+ estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative.
+ // This implies that unless we send a window update, the sender won't be able to send all the bytes
+ // for this message. Therefore we must send an update over the limit since there's an active read
+ // request from the application.
+ if estUntransmittedData > estSenderQuota {
+ // Sender's window shouldn't go more than 2^31 - 1 as speecified in the HTTP spec.
+ if f.limit+n > maxWindowSize {
+ f.delta = maxWindowSize - f.limit
+ } else {
+ // Send a window update for the whole message and not just the difference between
+ // estUntransmittedData and estSenderQuota. This will be helpful in case the message
+ // is padded; We will fallback on the current available window(at least a 1/4th of the limit).
+ f.delta = n
+ }
+ return f.delta
+ }
+ return 0
+}
+
+// onData is invoked when some data frame is received. It updates pendingData.
+func (f *inFlow) onData(n uint32) error {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ f.pendingData += n
+ if f.pendingData+f.pendingUpdate > f.limit+f.delta {
+ return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
+ }
+ return nil
+}
+
+// onRead is invoked when the application reads the data. It returns the window size
+// to be sent to the peer.
+func (f *inFlow) onRead(n uint32) uint32 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.pendingData == 0 {
+ return 0
+ }
+ f.pendingData -= n
+ if n > f.delta {
+ n -= f.delta
+ f.delta = 0
+ } else {
+ f.delta -= n
+ n = 0
+ }
+ f.pendingUpdate += n
+ if f.pendingUpdate >= f.limit/4 {
+ wu := f.pendingUpdate
+ f.pendingUpdate = 0
+ return wu
+ }
+ return 0
+}
+
+func (f *inFlow) resetPendingUpdate() uint32 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ n := f.pendingUpdate
+ f.pendingUpdate = 0
+ return n
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go
index e0d00115d..5babcf9b8 100644
--- a/vendor/google.golang.org/grpc/internal/transport/go16.go
+++ b/vendor/google.golang.org/grpc/transport/go16.go
@@ -25,7 +25,6 @@ import (
"net/http"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
"golang.org/x/net/context"
)
@@ -35,15 +34,15 @@ func dialContext(ctx context.Context, network, address string) (net.Conn, error)
return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address)
}
-// ContextErr converts the error from context package into a status error.
-func ContextErr(err error) error {
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
+ return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled:
- return status.Error(codes.Canceled, err.Error())
+ return streamErrorf(codes.Canceled, "%v", err)
}
- return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
+ return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
}
// contextFromRequest returns a background context.
diff --git a/vendor/google.golang.org/grpc/internal/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go
index 4d515b00d..b7fa6bdb9 100644
--- a/vendor/google.golang.org/grpc/internal/transport/go17.go
+++ b/vendor/google.golang.org/grpc/transport/go17.go
@@ -26,7 +26,6 @@ import (
"net/http"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
netctx "golang.org/x/net/context"
)
@@ -36,15 +35,15 @@ func dialContext(ctx context.Context, network, address string) (net.Conn, error)
return (&net.Dialer{}).DialContext(ctx, network, address)
}
-// ContextErr converts the error from context package into a status error.
-func ContextErr(err error) error {
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
switch err {
case context.DeadlineExceeded, netctx.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
+ return streamErrorf(codes.DeadlineExceeded, "%v", err)
case context.Canceled, netctx.Canceled:
- return status.Error(codes.Canceled, err.Error())
+ return streamErrorf(codes.Canceled, "%v", err)
}
- return status.Errorf(codes.Internal, "Unexpected error from context packet: %v", err)
+ return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err)
}
// contextFromRequest returns a context from the HTTP Request.
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go
index c6fb4b9c1..27c4ebb5f 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/transport/handler_server.go
@@ -40,24 +40,20 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
- "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
// NewServerHandlerTransport returns a ServerTransport handling gRPC
// from inside an http.Handler. It requires that the http Server
// supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
if r.ProtoMajor != 2 {
return nil, errors.New("gRPC requires HTTP/2")
}
if r.Method != "POST" {
return nil, errors.New("invalid gRPC request method")
}
- contentType := r.Header.Get("Content-Type")
- // TODO: do we assume contentType is lowercase? we did before
- contentSubtype, validContentType := contentSubtype(contentType)
- if !validContentType {
+ if !validContentType(r.Header.Get("Content-Type")) {
return nil, errors.New("invalid gRPC request content-type")
}
if _, ok := w.(http.Flusher); !ok {
@@ -68,37 +64,34 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats sta
}
st := &serverHandlerTransport{
- rw: w,
- req: r,
- closedCh: make(chan struct{}),
- writes: make(chan func()),
- contentType: contentType,
- contentSubtype: contentSubtype,
- stats: stats,
+ rw: w,
+ req: r,
+ closedCh: make(chan struct{}),
+ writes: make(chan func()),
}
if v := r.Header.Get("grpc-timeout"); v != "" {
to, err := decodeTimeout(v)
if err != nil {
- return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
+ return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err)
}
st.timeoutSet = true
st.timeout = to
}
- metakv := []string{"content-type", contentType}
+ var metakv []string
if r.Host != "" {
metakv = append(metakv, ":authority", r.Host)
}
for k, vv := range r.Header {
k = strings.ToLower(k)
- if isReservedHeader(k) && !isWhitelistedHeader(k) {
+ if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) {
continue
}
for _, v := range vv {
v, err := decodeMetadataHeader(k, v)
if err != nil {
- return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
+ return nil, streamErrorf(codes.InvalidArgument, "malformed binary metadata: %v", err)
}
metakv = append(metakv, k, v)
}
@@ -133,14 +126,6 @@ type serverHandlerTransport struct {
// block concurrent WriteStatus calls
// e.g. grpc/(*serverStream).SendMsg/RecvMsg
writeStatusMu sync.Mutex
-
- // we just mirror the request content-type
- contentType string
- // we store both contentType and contentSubtype so we don't keep recreating them
- // TODO make sure this is consistent across handler_server and http2_server
- contentSubtype string
-
- stats stats.Handler
}
func (ht *serverHandlerTransport) Close() error {
@@ -234,12 +219,9 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
})
if err == nil { // transport has not been closed
- if ht.stats != nil {
- ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
- }
+ ht.Close()
close(ht.writes)
}
- ht.Close()
return err
}
@@ -253,7 +235,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
h := ht.rw.Header()
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
- h.Set("Content-Type", ht.contentType)
+ h.Set("Content-Type", "application/grpc")
// Predeclare trailers we'll set later in WriteStatus (after the body).
// This is a SHOULD in the HTTP RFC, and the way you add (known)
@@ -274,12 +256,14 @@ func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts
ht.writeCommonHeaders(s)
ht.rw.Write(hdr)
ht.rw.Write(data)
- ht.rw.(http.Flusher).Flush()
+ if !opts.Delay {
+ ht.rw.(http.Flusher).Flush()
+ }
})
}
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
- err := ht.do(func() {
+ return ht.do(func() {
ht.writeCommonHeaders(s)
h := ht.rw.Header()
for k, vv := range md {
@@ -295,13 +279,6 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
ht.rw.WriteHeader(200)
ht.rw.(http.Flusher).Flush()
})
-
- if err == nil {
- if ht.stats != nil {
- ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
- }
- }
- return err
}
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
@@ -326,24 +303,23 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
go func() {
select {
case <-requestOver:
+ return
case <-ht.closedCh:
case <-clientGone:
}
cancel()
- ht.Close()
}()
req := ht.req
s := &Stream{
- id: 0, // irrelevant
- requestRead: func(int) {},
- cancel: cancel,
- buf: newRecvBuffer(),
- st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- contentSubtype: ht.contentSubtype,
+ id: 0, // irrelevant
+ requestRead: func(int) {},
+ cancel: cancel,
+ buf: newRecvBuffer(),
+ st: ht,
+ method: req.URL.Path,
+ recvCompress: req.Header.Get("grpc-encoding"),
}
pr := &peer.Peer{
Addr: ht.RemoteAddr(),
@@ -352,18 +328,10 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace
pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
}
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
- s.ctx = peer.NewContext(ctx, pr)
- if ht.stats != nil {
- s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: ht.RemoteAddr(),
- Compression: s.recvCompress,
- }
- ht.stats.HandleRPC(s.ctx, inHeader)
- }
+ ctx = peer.NewContext(ctx, pr)
+ s.ctx = newContextWithStream(ctx, s)
s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
+ reader: &recvBufferReader{ctx: s.ctx, recv: s.buf},
windowHandler: func(int) {},
}
@@ -418,10 +386,6 @@ func (ht *serverHandlerTransport) runStream() {
}
}
-func (ht *serverHandlerTransport) IncrMsgSent() {}
-
-func (ht *serverHandlerTransport) IncrMsgRecv() {}
-
func (ht *serverHandlerTransport) Drain() {
panic("Drain() is not implemented")
}
@@ -432,18 +396,18 @@ func (ht *serverHandlerTransport) Drain() {
// * io.EOF
// * io.ErrUnexpectedEOF
// * of type transport.ConnectionError
-// * an error from the status package
+// * of type transport.StreamError
func mapRecvMsgError(err error) error {
if err == io.EOF || err == io.ErrUnexpectedEOF {
return err
}
if se, ok := err.(http2.StreamError); ok {
if code, ok := http2ErrConvTab[se.Code]; ok {
- return status.Error(code, se.Error())
+ return StreamError{
+ Code: code,
+ Desc: se.Error(),
+ }
}
}
- if strings.Contains(err.Error(), "body closed by handler") {
- return status.Error(codes.Canceled, err.Error())
- }
return connectionErrorf(true, err, err.Error())
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go
index 0c3c47e2a..4a122692a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/transport/http2_client.go
@@ -19,10 +19,11 @@
package transport
import (
+ "bytes"
+ "fmt"
"io"
"math"
"net"
- "strconv"
"strings"
"sync"
"sync/atomic"
@@ -31,10 +32,8 @@ import (
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
-
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@@ -46,17 +45,14 @@ import (
type http2Client struct {
ctx context.Context
cancel context.CancelFunc
- ctxDone <-chan struct{} // Cache the ctx.Done() chan.
userAgent string
md interface{}
conn net.Conn // underlying communication channel
- loopy *loopyWriter
remoteAddr net.Addr
localAddr net.Addr
authInfo credentials.AuthInfo // auth info about the connection
+ nextID uint32 // the next stream ID to be used
- readerDone chan struct{} // sync point to enable testing.
- writerDone chan struct{} // sync point to enable testing.
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport.
goAway chan struct{}
@@ -64,57 +60,57 @@ type http2Client struct {
awakenKeepalive chan struct{}
framer *framer
+ hBuf *bytes.Buffer // the buffer for HPACK encoding
+ hEnc *hpack.Encoder // HPACK encoder
+
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
- fc *trInFlow
+ fc *inFlow
+ // sendQuotaPool provides flow control to outbound message.
+ sendQuotaPool *quotaPool
+ // localSendQuota limits the amount of data that can be scheduled
+ // for writing before it is actually written out.
+ localSendQuota *quotaPool
+ // streamsQuota limits the max number of concurrent streams.
+ streamsQuota *quotaPool
+
// The scheme used: https if TLS is on, http otherwise.
scheme string
isSecure bool
- perRPCCreds []credentials.PerRPCCredentials
+ creds []credentials.PerRPCCredentials
// Boolean to keep track of reading activity on transport.
// 1 is true and 0 is false.
- activity uint32 // Accessed atomically.
- kp keepalive.ClientParameters
- keepaliveEnabled bool
+ activity uint32 // Accessed atomically.
+ kp keepalive.ClientParameters
statsHandler stats.Handler
initialWindowSize int32
- // configured by peer through SETTINGS_MAX_HEADER_LIST_SIZE
- maxSendHeaderListSize *uint32
+ bdpEst *bdpEstimator
+ outQuotaVersion uint32
- bdpEst *bdpEstimator
// onSuccess is a callback that client transport calls upon
// receiving server preface to signal that a succefull HTTP2
// connection was established.
onSuccess func()
- maxConcurrentStreams uint32
- streamQuota int64
- streamsQuotaAvailable chan struct{}
- waitingStreams uint32
- nextID uint32
-
- mu sync.Mutex // guard the following variables
- state transportState
+ mu sync.Mutex // guard the following variables
+ state transportState // the state of underlying connection
activeStreams map[uint32]*Stream
+ // The max number of concurrent streams
+ maxStreams int
+ // the per-stream outbound flow control window size set by the peer.
+ streamSendQuota uint32
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
prevGoAwayID uint32
// goAwayReason records the http2.ErrCode and debug data received with the
// GoAway frame.
goAwayReason GoAwayReason
-
- // Fields below are for channelz metric collection.
- channelzID int64 // channelz unique identification number
- czData *channelzData
-
- onGoAway func(GoAwayReason)
- onClose func()
}
func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
@@ -125,6 +121,18 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error
}
func isTemporary(err error) bool {
+ switch err {
+ case io.EOF:
+ // Connection closures may be resolved upon retry, and are thus
+ // treated as temporary.
+ return true
+ case context.DeadlineExceeded:
+ // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this
+ // special case is not needed. Until then, we need to keep this
+ // clause.
+ return true
+ }
+
switch err := err.(type) {
case interface {
Temporary() bool
@@ -137,13 +145,13 @@ func isTemporary(err error) bool {
// temporary.
return err.Timeout()
}
- return true
+ return false
}
// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
+func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@@ -169,22 +177,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
isSecure bool
authInfo credentials.AuthInfo
)
- transportCreds := opts.TransportCredentials
- perRPCCreds := opts.PerRPCCredentials
-
- if b := opts.CredsBundle; b != nil {
- if t := b.TransportCredentials(); t != nil {
- transportCreds = t
- }
- if t := b.PerRPCCredentials(); t != nil {
- perRPCCreds = append(perRPCCreds, t)
- }
- }
- if transportCreds != nil {
+ if creds := opts.TransportCredentials; creds != nil {
scheme = "https"
- conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
+ conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn)
if err != nil {
- return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
+ // Credentials handshake errors are typically considered permanent
+ // to avoid retrying on e.g. bad certificates.
+ temp := isTemporary(err)
+ return nil, connectionErrorf(temp, err, "transport: authentication handshake failed: %v", err)
}
isSecure = true
}
@@ -202,45 +202,48 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
icwz = opts.InitialConnWindowSize
dynamicWindow = false
}
- writeBufSize := opts.WriteBufferSize
- readBufSize := opts.ReadBufferSize
- maxHeaderListSize := defaultClientMaxHeaderListSize
- if opts.MaxHeaderListSize != nil {
- maxHeaderListSize = *opts.MaxHeaderListSize
+ var buf bytes.Buffer
+ writeBufSize := defaultWriteBufSize
+ if opts.WriteBufferSize > 0 {
+ writeBufSize = opts.WriteBufferSize
+ }
+ readBufSize := defaultReadBufSize
+ if opts.ReadBufferSize > 0 {
+ readBufSize = opts.ReadBufferSize
}
t := &http2Client{
- ctx: ctx,
- ctxDone: ctx.Done(), // Cache Done chan.
- cancel: cancel,
- userAgent: opts.UserAgent,
- md: addr.Metadata,
- conn: conn,
- remoteAddr: conn.RemoteAddr(),
- localAddr: conn.LocalAddr(),
- authInfo: authInfo,
- readerDone: make(chan struct{}),
- writerDone: make(chan struct{}),
- goAway: make(chan struct{}),
- awakenKeepalive: make(chan struct{}, 1),
- framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
- fc: &trInFlow{limit: uint32(icwz)},
- scheme: scheme,
- activeStreams: make(map[uint32]*Stream),
- isSecure: isSecure,
- perRPCCreds: perRPCCreds,
- kp: kp,
- statsHandler: opts.StatsHandler,
- initialWindowSize: initialWindowSize,
- onSuccess: onSuccess,
- nextID: 1,
- maxConcurrentStreams: defaultMaxStreamsClient,
- streamQuota: defaultMaxStreamsClient,
- streamsQuotaAvailable: make(chan struct{}, 1),
- czData: new(channelzData),
- onGoAway: onGoAway,
- onClose: onClose,
- }
- t.controlBuf = newControlBuffer(t.ctxDone)
+ ctx: ctx,
+ cancel: cancel,
+ userAgent: opts.UserAgent,
+ md: addr.Metadata,
+ conn: conn,
+ remoteAddr: conn.RemoteAddr(),
+ localAddr: conn.LocalAddr(),
+ authInfo: authInfo,
+ // The client initiated stream id is odd starting from 1.
+ nextID: 1,
+ goAway: make(chan struct{}),
+ awakenKeepalive: make(chan struct{}, 1),
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ framer: newFramer(conn, writeBufSize, readBufSize),
+ controlBuf: newControlBuffer(),
+ fc: &inFlow{limit: uint32(icwz)},
+ sendQuotaPool: newQuotaPool(defaultWindowSize),
+ localSendQuota: newQuotaPool(defaultLocalSendQuota),
+ scheme: scheme,
+ state: reachable,
+ activeStreams: make(map[uint32]*Stream),
+ isSecure: isSecure,
+ creds: opts.PerRPCCredentials,
+ maxStreams: defaultMaxStreamsClient,
+ streamsQuota: newQuotaPool(defaultMaxStreamsClient),
+ streamSendQuota: defaultWindowSize,
+ kp: kp,
+ statsHandler: opts.StatsHandler,
+ initialWindowSize: initialWindowSize,
+ onSuccess: onSuccess,
+ }
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
dynamicWindow = false
@@ -264,18 +267,10 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
}
t.statsHandler.HandleConn(t.ctx, connBegin)
}
- if channelz.IsOn() {
- t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "")
- }
- if t.kp.Time != infinity {
- t.keepaliveEnabled = true
- go t.keepalive()
- }
// Start the reader goroutine for incoming message. Each transport has
// a dedicated goroutine which reads HTTP2 frame from network. Then it
// dispatches the frame to the corresponding stream entity.
go t.reader()
-
// Send connection preface to server.
n, err := t.conn.Write(clientPreface)
if err != nil {
@@ -286,21 +281,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
t.Close()
return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
}
- var ss []http2.Setting
-
if t.initialWindowSize != defaultWindowSize {
- ss = append(ss, http2.Setting{
+ err = t.framer.fr.WriteSettings(http2.Setting{
ID: http2.SettingInitialWindowSize,
Val: uint32(t.initialWindowSize),
})
+ } else {
+ err = t.framer.fr.WriteSettings()
}
- if opts.MaxHeaderListSize != nil {
- ss = append(ss, http2.Setting{
- ID: http2.SettingMaxHeaderListSize,
- Val: *opts.MaxHeaderListSize,
- })
- }
- err = t.framer.fr.WriteSettings(ss...)
if err != nil {
t.Close()
return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
@@ -312,35 +300,31 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts Conne
return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
}
}
-
t.framer.writer.Flush()
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
- err := t.loopy.run()
- if err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
- }
- // If it's a connection error, let reader goroutine handle it
- // since there might be data in the buffers.
- if _, ok := err.(net.Error); !ok {
- t.conn.Close()
- }
- close(t.writerDone)
+ loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
+ t.conn.Close()
}()
+ if t.kp.Time != infinity {
+ go t.keepalive()
+ }
return t, nil
}
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
s := &Stream{
- done: make(chan struct{}),
- method: callHdr.Method,
- sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
- headerChan: make(chan struct{}),
- contentSubtype: callHdr.ContentSubtype,
- }
- s.wq = newWriteQuota(defaultWriteQuota, s.done)
+ id: t.nextID,
+ done: make(chan struct{}),
+ goAway: make(chan struct{}),
+ method: callHdr.Method,
+ sendCompress: callHdr.SendCompress,
+ buf: newRecvBuffer(),
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
+ headerChan: make(chan struct{}),
+ }
+ t.nextID += 2
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
@@ -350,18 +334,26 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
s.ctx = ctx
s.trReader = &transportReader{
reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctx.Done(),
- recv: s.buf,
+ ctx: s.ctx,
+ goAway: s.goAway,
+ recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
},
}
+ s.waiters = waiters{
+ ctx: s.ctx,
+ tctx: t.ctx,
+ done: s.done,
+ goAway: s.goAway,
+ }
return s
}
-func (t *http2Client) getPeer() *peer.Peer {
+// NewStream creates a stream and registers it into the transport as "active"
+// streams.
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
pr := &peer.Peer{
Addr: t.remoteAddr,
}
@@ -369,17 +361,67 @@ func (t *http2Client) getPeer() *peer.Peer {
if t.authInfo != nil {
pr.AuthInfo = t.authInfo
}
- return pr
-}
-
-func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
- aud := t.createAudience(callHdr)
- authData, err := t.getTrAuthData(ctx, aud)
- if err != nil {
- return nil, err
+ ctx = peer.NewContext(ctx, pr)
+ var (
+ authData = make(map[string]string)
+ audience string
+ )
+ // Create an audience string only if needed.
+ if len(t.creds) > 0 || callHdr.Creds != nil {
+ // Construct URI required to get auth request metadata.
+ // Omit port if it is the default one.
+ host := strings.TrimSuffix(callHdr.Host, ":443")
+ pos := strings.LastIndex(callHdr.Method, "/")
+ if pos == -1 {
+ pos = len(callHdr.Method)
+ }
+ audience = "https://" + host + callHdr.Method[:pos]
}
- callAuthData, err := t.getCallAuthData(ctx, aud, callHdr)
- if err != nil {
+ for _, c := range t.creds {
+ data, err := c.GetRequestMetadata(ctx, audience)
+ if err != nil {
+ return nil, streamErrorf(codes.Internal, "transport: %v", err)
+ }
+ for k, v := range data {
+ // Capital header names are illegal in HTTP/2.
+ k = strings.ToLower(k)
+ authData[k] = v
+ }
+ }
+ callAuthData := map[string]string{}
+ // Check if credentials.PerRPCCredentials were provided via call options.
+ // Note: if these credentials are provided both via dial options and call
+ // options, then both sets of credentials will be applied.
+ if callCreds := callHdr.Creds; callCreds != nil {
+ if !t.isSecure && callCreds.RequireTransportSecurity() {
+ return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+ }
+ data, err := callCreds.GetRequestMetadata(ctx, audience)
+ if err != nil {
+ return nil, streamErrorf(codes.Internal, "transport: %v", err)
+ }
+ for k, v := range data {
+ // Capital header names are illegal in HTTP/2
+ k = strings.ToLower(k)
+ callAuthData[k] = v
+ }
+ }
+ t.mu.Lock()
+ if t.activeStreams == nil {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
+ }
+ if t.state == draining {
+ t.mu.Unlock()
+ return nil, errStreamDrain
+ }
+ if t.state != reachable {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
+ }
+ t.mu.Unlock()
+ // Get a quota of 1 from streamsQuota.
+ if _, _, err := t.streamsQuota.get(1, waiters{ctx: ctx, tctx: t.ctx}); err != nil {
return nil, err
}
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
@@ -392,12 +434,9 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
- if callHdr.PreviousAttempts > 0 {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
- }
if callHdr.SendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
@@ -420,22 +459,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
if b := stats.OutgoingTrace(ctx); b != nil {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
}
-
- if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
- var k string
- for _, vv := range added {
- for i, v := range vv {
- if i%2 == 0 {
- k = v
- continue
- }
- // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
- if isReservedHeader(k) {
- continue
- }
- headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
- }
- }
+ if md, ok := metadata.FromOutgoingContext(ctx); ok {
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
@@ -456,202 +480,38 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
}
}
}
- return headerFields, nil
-}
-
-func (t *http2Client) createAudience(callHdr *CallHdr) string {
- // Create an audience string only if needed.
- if len(t.perRPCCreds) == 0 && callHdr.Creds == nil {
- return ""
- }
- // Construct URI required to get auth request metadata.
- // Omit port if it is the default one.
- host := strings.TrimSuffix(callHdr.Host, ":443")
- pos := strings.LastIndex(callHdr.Method, "/")
- if pos == -1 {
- pos = len(callHdr.Method)
- }
- return "https://" + host + callHdr.Method[:pos]
-}
-
-func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) {
- authData := map[string]string{}
- for _, c := range t.perRPCCreds {
- data, err := c.GetRequestMetadata(ctx, audience)
- if err != nil {
- if _, ok := status.FromError(err); ok {
- return nil, err
- }
-
- return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
- }
- for k, v := range data {
- // Capital header names are illegal in HTTP/2.
- k = strings.ToLower(k)
- authData[k] = v
- }
- }
- return authData, nil
-}
-
-func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) {
- callAuthData := map[string]string{}
- // Check if credentials.PerRPCCredentials were provided via call options.
- // Note: if these credentials are provided both via dial options and call
- // options, then both sets of credentials will be applied.
- if callCreds := callHdr.Creds; callCreds != nil {
- if !t.isSecure && callCreds.RequireTransportSecurity() {
- return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
- }
- data, err := callCreds.GetRequestMetadata(ctx, audience)
- if err != nil {
- return nil, status.Errorf(codes.Internal, "transport: %v", err)
- }
- for k, v := range data {
- // Capital header names are illegal in HTTP/2
- k = strings.ToLower(k)
- callAuthData[k] = v
- }
+ t.mu.Lock()
+ if t.state == draining {
+ t.mu.Unlock()
+ t.streamsQuota.add(1)
+ return nil, errStreamDrain
}
- return callAuthData, nil
-}
-
-// NewStream creates a stream and registers it into the transport as "active"
-// streams.
-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
- ctx = peer.NewContext(ctx, t.getPeer())
- headerFields, err := t.createHeaderFields(ctx, callHdr)
- if err != nil {
- return nil, err
+ if t.state != reachable {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
}
s := t.newStream(ctx, callHdr)
- cleanup := func(err error) {
- if s.swapState(streamDone) == streamDone {
- // If it was already done, return.
- return
- }
- // The stream was unprocessed by the server.
- atomic.StoreUint32(&s.unprocessed, 1)
- s.write(recvMsg{err: err})
- close(s.done)
- // If headerChan isn't closed, then close it.
- if atomic.SwapUint32(&s.headerDone, 1) == 0 {
- close(s.headerChan)
+ t.activeStreams[s.id] = s
+ // If the number of active streams change from 0 to 1, then check if keepalive
+ // has gone dormant. If so, wake it up.
+ if len(t.activeStreams) == 1 {
+ select {
+ case t.awakenKeepalive <- struct{}{}:
+ t.controlBuf.put(&ping{data: [8]byte{}})
+ // Fill the awakenKeepalive channel again as this channel must be
+ // kept non-writable except at the point that the keepalive()
+ // goroutine is waiting either to be awaken or shutdown.
+ t.awakenKeepalive <- struct{}{}
+ default:
}
-
}
- hdr := &headerFrame{
+ t.controlBuf.put(&headerFrame{
+ streamID: s.id,
hf: headerFields,
endStream: false,
- initStream: func(id uint32) (bool, error) {
- t.mu.Lock()
- if state := t.state; state != reachable {
- t.mu.Unlock()
- // Do a quick cleanup.
- err := error(errStreamDrain)
- if state == closing {
- err = ErrConnClosing
- }
- cleanup(err)
- return false, err
- }
- t.activeStreams[id] = s
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.streamsStarted, 1)
- atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
- }
- var sendPing bool
- // If the number of active streams change from 0 to 1, then check if keepalive
- // has gone dormant. If so, wake it up.
- if len(t.activeStreams) == 1 && t.keepaliveEnabled {
- select {
- case t.awakenKeepalive <- struct{}{}:
- sendPing = true
- // Fill the awakenKeepalive channel again as this channel must be
- // kept non-writable except at the point that the keepalive()
- // goroutine is waiting either to be awaken or shutdown.
- t.awakenKeepalive <- struct{}{}
- default:
- }
- }
- t.mu.Unlock()
- return sendPing, nil
- },
- onOrphaned: cleanup,
- wq: s.wq,
- }
- firstTry := true
- var ch chan struct{}
- checkForStreamQuota := func(it interface{}) bool {
- if t.streamQuota <= 0 { // Can go negative if server decreases it.
- if firstTry {
- t.waitingStreams++
- }
- ch = t.streamsQuotaAvailable
- return false
- }
- if !firstTry {
- t.waitingStreams--
- }
- t.streamQuota--
- h := it.(*headerFrame)
- h.streamID = t.nextID
- t.nextID += 2
- s.id = h.streamID
- s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
- if t.streamQuota > 0 && t.waitingStreams > 0 {
- select {
- case t.streamsQuotaAvailable <- struct{}{}:
- default:
- }
- }
- return true
- }
- var hdrListSizeErr error
- checkForHeaderListSize := func(it interface{}) bool {
- if t.maxSendHeaderListSize == nil {
- return true
- }
- hdrFrame := it.(*headerFrame)
- var sz int64
- for _, f := range hdrFrame.hf {
- if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
- return false
- }
- }
- return true
- }
- for {
- success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
- if !checkForStreamQuota(it) {
- return false
- }
- if !checkForHeaderListSize(it) {
- return false
- }
- return true
- }, hdr)
- if err != nil {
- return nil, err
- }
- if success {
- break
- }
- if hdrListSizeErr != nil {
- return nil, hdrListSizeErr
- }
- firstTry = false
- select {
- case <-ch:
- case <-s.ctx.Done():
- return nil, ContextErr(s.ctx.Err())
- case <-t.goAway:
- return nil, errStreamDrain
- case <-t.ctx.Done():
- return nil, ErrConnClosing
- }
- }
+ })
+ t.mu.Unlock()
+
if t.statsHandler != nil {
outHeader := &stats.OutHeader{
Client: true,
@@ -668,102 +528,86 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
// CloseStream clears the footprint of a stream when the stream is not needed any more.
// This must not be executed in reader's goroutine.
func (t *http2Client) CloseStream(s *Stream, err error) {
- var (
- rst bool
- rstCode http2.ErrCode
- )
- if err != nil {
- rst = true
- rstCode = http2.ErrCodeCancel
- }
- t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
-}
-
-func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
- // Set stream status to done.
- if s.swapState(streamDone) == streamDone {
- // If it was already done, return. If multiple closeStream calls
- // happen simultaneously, wait for the first to finish.
- <-s.done
+ t.mu.Lock()
+ if t.activeStreams == nil {
+ t.mu.Unlock()
return
}
- // status and trailers can be updated here without any synchronization because the stream goroutine will
- // only read it after it sees an io.EOF error from read or write and we'll write those errors
- // only after updating this.
- s.status = st
- if len(mdata) > 0 {
- s.trailer = mdata
- }
if err != nil {
- // This will unblock reads eventually.
+ // notify in-flight streams, before the deletion
s.write(recvMsg{err: err})
}
- // If headerChan isn't closed, then close it.
- if atomic.SwapUint32(&s.headerDone, 1) == 0 {
- s.noHeaders = true
- close(s.headerChan)
- }
- cleanup := &cleanupStream{
- streamID: s.id,
- onWrite: func() {
- t.mu.Lock()
- if t.activeStreams != nil {
- delete(t.activeStreams, s.id)
- }
- t.mu.Unlock()
- if channelz.IsOn() {
- if eosReceived {
- atomic.AddInt64(&t.czData.streamsSucceeded, 1)
- } else {
- atomic.AddInt64(&t.czData.streamsFailed, 1)
- }
- }
- },
- rst: rst,
- rstCode: rstCode,
+ delete(t.activeStreams, s.id)
+ if t.state == draining && len(t.activeStreams) == 0 {
+ // The transport is draining and s is the last live stream on t.
+ t.mu.Unlock()
+ t.Close()
+ return
}
- addBackStreamQuota := func(interface{}) bool {
- t.streamQuota++
- if t.streamQuota > 0 && t.waitingStreams > 0 {
- select {
- case t.streamsQuotaAvailable <- struct{}{}:
- default:
- }
+ t.mu.Unlock()
+ // rstStream is true in case the stream is being closed at the client-side
+ // and the server needs to be intimated about it by sending a RST_STREAM
+ // frame.
+ // To make sure this frame is written to the wire before the headers of the
+ // next stream waiting for streamsQuota, we add to streamsQuota pool only
+ // after having acquired the writableChan to send RST_STREAM out (look at
+ // the controller() routine).
+ var rstStream bool
+ var rstError http2.ErrCode
+ defer func() {
+ // In case, the client doesn't have to send RST_STREAM to server
+ // we can safely add back to streamsQuota pool now.
+ if !rstStream {
+ t.streamsQuota.add(1)
+ return
}
- return true
+ t.controlBuf.put(&resetStream{s.id, rstError})
+ }()
+ s.mu.Lock()
+ rstStream = s.rstStream
+ rstError = s.rstError
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.state = streamDone
+ s.mu.Unlock()
+ if _, ok := err.(StreamError); ok {
+ rstStream = true
+ rstError = http2.ErrCodeCancel
}
- t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
- // This will unblock write.
- close(s.done)
}
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
// accessed any more.
-//
-// This method blocks until the addrConn that initiated this transport is
-// re-connected. This happens because t.onClose() begins reconnect logic at the
-// addrConn level and blocks until the addrConn is successfully connected.
func (t *http2Client) Close() error {
t.mu.Lock()
- // Make sure we only Close once.
if t.state == closing {
t.mu.Unlock()
return nil
}
t.state = closing
- streams := t.activeStreams
- t.activeStreams = nil
t.mu.Unlock()
- t.controlBuf.finish()
t.cancel()
err := t.conn.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(t.channelzID)
- }
+ t.mu.Lock()
+ streams := t.activeStreams
+ t.activeStreams = nil
+ t.mu.Unlock()
// Notify all active streams.
for _, s := range streams {
- t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
+ s.mu.Lock()
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: ErrConnClosing})
}
if t.statsHandler != nil {
connEnd := &stats.ConnEnd{
@@ -771,7 +615,6 @@ func (t *http2Client) Close() error {
}
t.statsHandler.HandleConn(t.ctx, connEnd)
}
- go t.onClose()
return err
}
@@ -782,8 +625,8 @@ func (t *http2Client) Close() error {
// closing.
func (t *http2Client) GracefulClose() error {
t.mu.Lock()
- // Make sure we move to draining only from active.
- if t.state == draining || t.state == closing {
+ switch t.state {
+ case closing, draining:
t.mu.Unlock()
return nil
}
@@ -793,41 +636,112 @@ func (t *http2Client) GracefulClose() error {
if active == 0 {
return t.Close()
}
- t.controlBuf.put(&incomingGoAway{})
return nil
}
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
- if opts.Last {
- // If it's the last message, update stream state.
- if !s.compareAndSwapState(streamActive, streamWriteDone) {
- return errStreamDone
- }
- } else if s.getState() != streamActive {
- return errStreamDone
+ select {
+ case <-s.ctx.Done():
+ return ContextErr(s.ctx.Err())
+ case <-t.ctx.Done():
+ return ErrConnClosing
+ default:
}
- df := &dataFrame{
- streamID: s.id,
- endStream: opts.Last,
- }
- if hdr != nil || data != nil { // If it's not an empty data frame.
- // Add some data to grpc message header so that we can equally
- // distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
- df.h, df.d = hdr, data
- // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
- return err
+
+ if hdr == nil && data == nil && opts.Last {
+ // stream.CloseSend uses this to send an empty frame with endStream=True
+ t.controlBuf.put(&dataFrame{streamID: s.id, endStream: true, f: func() {}})
+ return nil
+ }
+ // Add data to header frame so that we can equally distribute data across frames.
+ emptyLen := http2MaxFrameLen - len(hdr)
+ if emptyLen > len(data) {
+ emptyLen = len(data)
+ }
+ hdr = append(hdr, data[:emptyLen]...)
+ data = data[emptyLen:]
+ var (
+ streamQuota int
+ streamQuotaVer uint32
+ err error
+ )
+ for idx, r := range [][]byte{hdr, data} {
+ for len(r) > 0 {
+ size := http2MaxFrameLen
+ if size > len(r) {
+ size = len(r)
+ }
+ if streamQuota == 0 { // Used up all the locally cached stream quota.
+ // Get all the stream quota there is.
+ streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
+ if err != nil {
+ return err
+ }
+ }
+ if size > streamQuota {
+ size = streamQuota
+ }
+
+ // Get size worth quota from transport.
+ tq, _, err := t.sendQuotaPool.get(size, s.waiters)
+ if err != nil {
+ return err
+ }
+ if tq < size {
+ size = tq
+ }
+ ltq, _, err := t.localSendQuota.get(size, s.waiters)
+ if err != nil {
+ return err
+ }
+ // even if ltq is smaller than size we don't adjust size since
+ // ltq is only a soft limit.
+ streamQuota -= size
+ p := r[:size]
+ var endStream bool
+ // See if this is the last frame to be written.
+ if opts.Last {
+ if len(r)-size == 0 { // No more data in r after this iteration.
+ if idx == 0 { // We're writing data header.
+ if len(data) == 0 { // There's no data to follow.
+ endStream = true
+ }
+ } else { // We're writing data.
+ endStream = true
+ }
+ }
+ }
+ success := func() {
+ ltq := ltq
+ t.controlBuf.put(&dataFrame{streamID: s.id, endStream: endStream, d: p, f: func() { t.localSendQuota.add(ltq) }})
+ r = r[size:]
+ }
+ failure := func() { // The stream quota version must have changed.
+ // Our streamQuota cache is invalidated now, so give it back.
+ s.sendQuotaPool.lockedAdd(streamQuota + size)
+ }
+ if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
+ // Couldn't send this chunk out.
+ t.sendQuotaPool.add(size)
+ t.localSendQuota.add(ltq)
+ streamQuota = 0
+ }
}
}
- return t.controlBuf.put(df)
+ if streamQuota > 0 { // Add the left over quota back to stream.
+ s.sendQuotaPool.add(streamQuota)
+ }
+ if !opts.Last {
+ return nil
+ }
+ s.mu.Lock()
+ if s.state != streamDone {
+ s.state = streamWriteDone
+ }
+ s.mu.Unlock()
+ return nil
}
func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
@@ -841,17 +755,34 @@ func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
// of stream if the application is requesting data larger in size than
// the window.
func (t *http2Client) adjustWindow(s *Stream, n uint32) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.state == streamDone {
+ return
+ }
if w := s.fc.maybeAdjust(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+ // Piggyback connection's window update along.
+ if cw := t.fc.resetPendingUpdate(); cw > 0 {
+ t.controlBuf.put(&windowUpdate{0, cw})
+ }
+ t.controlBuf.put(&windowUpdate{s.id, w})
}
}
-// updateWindow adjusts the inbound quota for the stream.
-// Window updates will be sent out when the cumulative quota
-// exceeds the corresponding threshold.
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
func (t *http2Client) updateWindow(s *Stream, n uint32) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.state == streamDone {
+ return
+ }
if w := s.fc.onRead(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+ if cw := t.fc.resetPendingUpdate(); cw > 0 {
+ t.controlBuf.put(&windowUpdate{0, cw})
+ }
+ t.controlBuf.put(&windowUpdate{s.id, w})
}
}
@@ -863,17 +794,14 @@ func (t *http2Client) updateFlowControl(n uint32) {
for _, s := range t.activeStreams {
s.fc.newLimit(n)
}
+ t.initialWindowSize = int32(n)
t.mu.Unlock()
- updateIWS := func(interface{}) bool {
- t.initialWindowSize = int32(n)
- return true
- }
- t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
- t.controlBuf.put(&outgoingSettings{
+ t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
+ t.controlBuf.put(&settings{
ss: []http2.Setting{
{
ID: http2.SettingInitialWindowSize,
- Val: n,
+ Val: uint32(n),
},
},
})
@@ -883,7 +811,7 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
- sendBDPPing = t.bdpEst.add(size)
+ sendBDPPing = t.bdpEst.add(uint32(size))
}
// Decouple connection's flow control from application's read.
// An update on connection's flow control should not depend on
@@ -894,24 +822,21 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// active(fast) streams from starving in presence of slow or
// inactive streams.
//
- if w := t.fc.onData(size); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
+ // Furthermore, if a bdpPing is being sent out we can piggyback
+ // connection's window update for the bytes we just received.
if sendBDPPing {
- // Avoid excessive ping detection (e.g. in an L7 proxy)
- // by sending a window update prior to the BDP ping.
-
- if w := t.fc.reset(); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
+ if size != 0 { // Could've been an empty data frame.
+ t.controlBuf.put(&windowUpdate{0, uint32(size)})
}
-
t.controlBuf.put(bdpPing)
+ } else {
+ if err := t.fc.onData(uint32(size)); err != nil {
+ t.Close()
+ return
+ }
+ if w := t.fc.onRead(uint32(size)); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
@@ -919,15 +844,25 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
return
}
if size > 0 {
- if err := s.fc.onData(size); err != nil {
- t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ if err := s.fc.onData(uint32(size)); err != nil {
+ s.rstStream = true
+ s.rstError = http2.ErrCodeFlowControl
+ s.finish(status.New(codes.Internal, err.Error()))
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
return
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
+ if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
+ t.controlBuf.put(&windowUpdate{s.id, w})
}
}
+ s.mu.Unlock()
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
@@ -940,7 +875,14 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// The server has closed the stream without sending trailers. Record that
// the read direction is closed, and set the status appropriately.
if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ s.finish(status.New(codes.Internal, "server closed the stream without sending trailers"))
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
}
}
@@ -949,72 +891,73 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
if !ok {
return
}
- if f.ErrCode == http2.ErrCodeRefusedStream {
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+
+ code := http2.ErrCode(f.ErrCode)
+ if code == http2.ErrCodeRefusedStream {
// The stream was unprocessed by the server.
- atomic.StoreUint32(&s.unprocessed, 1)
+ s.unprocessed = true
}
- statusCode, ok := http2ErrConvTab[f.ErrCode]
+ statusCode, ok := http2ErrConvTab[code]
if !ok {
warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
statusCode = codes.Unknown
}
- if statusCode == codes.Canceled {
- // Our deadline was already exceeded, and that was likely the cause of
- // this cancelation. Alter the status code accordingly.
- if d, ok := s.ctx.Deadline(); ok && d.After(time.Now()) {
- statusCode = codes.DeadlineExceeded
- }
- }
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
+ s.finish(status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode))
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
}
func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
if f.IsAck() {
return
}
- var maxStreams *uint32
- var ss []http2.Setting
- var updateFuncs []func()
+ var rs []http2.Setting
+ var ps []http2.Setting
+ isMaxConcurrentStreamsMissing := true
f.ForeachSetting(func(s http2.Setting) error {
- switch s.ID {
- case http2.SettingMaxConcurrentStreams:
- maxStreams = new(uint32)
- *maxStreams = s.Val
- case http2.SettingMaxHeaderListSize:
- updateFuncs = append(updateFuncs, func() {
- t.maxSendHeaderListSize = new(uint32)
- *t.maxSendHeaderListSize = s.Val
- })
- default:
- ss = append(ss, s)
+ if s.ID == http2.SettingMaxConcurrentStreams {
+ isMaxConcurrentStreamsMissing = false
+ }
+ if t.isRestrictive(s) {
+ rs = append(rs, s)
+ } else {
+ ps = append(ps, s)
}
return nil
})
- if isFirst && maxStreams == nil {
- maxStreams = new(uint32)
- *maxStreams = math.MaxUint32
- }
- sf := &incomingSettings{
- ss: ss,
- }
- if maxStreams != nil {
- updateStreamQuota := func() {
- delta := int64(*maxStreams) - int64(t.maxConcurrentStreams)
- t.maxConcurrentStreams = *maxStreams
- t.streamQuota += delta
- if delta > 0 && t.waitingStreams > 0 {
- close(t.streamsQuotaAvailable) // wake all of them up.
- t.streamsQuotaAvailable = make(chan struct{}, 1)
- }
- }
- updateFuncs = append(updateFuncs, updateStreamQuota)
+ if isFirst && isMaxConcurrentStreamsMissing {
+ // This means server is imposing no limits on
+ // maximum number of concurrent streams initiated by client.
+ // So we must remove our self-imposed limit.
+ ps = append(ps, http2.Setting{
+ ID: http2.SettingMaxConcurrentStreams,
+ Val: math.MaxUint32,
+ })
}
- t.controlBuf.executeAndPut(func(interface{}) bool {
- for _, f := range updateFuncs {
- f()
- }
- return true
- }, sf)
+ t.applySettings(rs)
+ t.controlBuf.put(&settingsAck{})
+ t.applySettings(ps)
+}
+
+func (t *http2Client) isRestrictive(s http2.Setting) bool {
+ switch s.ID {
+ case http2.SettingMaxConcurrentStreams:
+ return int(s.Val) < t.maxStreams
+ case http2.SettingInitialWindowSize:
+ // Note: we don't acquire a lock here to read streamSendQuota
+ // because the same goroutine updates it later.
+ return s.Val < t.streamSendQuota
+ }
+ return false
}
func (t *http2Client) handlePing(f *http2.PingFrame) {
@@ -1032,7 +975,7 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.mu.Lock()
- if t.state == closing {
+ if t.state != reachable && t.state != draining {
t.mu.Unlock()
return
}
@@ -1067,10 +1010,6 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.setGoAwayReason(f)
close(t.goAway)
t.state = draining
- t.controlBuf.put(&incomingGoAway{})
-
- // This has to be a new goroutine because we're still using the current goroutine to read in the transport.
- t.onGoAway(t.goAwayReason)
}
// All streams with IDs greater than the GoAwayId
// and smaller than the previous GoAway ID should be killed.
@@ -1081,8 +1020,11 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
- atomic.StoreUint32(&stream.unprocessed, 1)
- t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
+ stream.mu.Lock()
+ stream.unprocessed = true
+ stream.finish(statusGoAway)
+ stream.mu.Unlock()
+ close(stream.goAway)
}
}
t.prevGoAwayID = id
@@ -1114,10 +1056,15 @@ func (t *http2Client) GetGoAwayReason() GoAwayReason {
}
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
- t.controlBuf.put(&incomingWindowUpdate{
- streamID: f.Header().StreamID,
- increment: f.Increment,
- })
+ id := f.Header().StreamID
+ incr := f.Increment
+ if id == 0 {
+ t.sendQuotaPool.add(int(incr))
+ return
+ }
+ if s, ok := t.getStream(f); ok {
+ s.sendQuotaPool.add(int(incr))
+ }
}
// operateHeaders takes action on the decoded headers.
@@ -1126,10 +1073,18 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
if !ok {
return
}
- atomic.StoreUint32(&s.bytesReceived, 1)
+ s.mu.Lock()
+ s.bytesReceived = true
+ s.mu.Unlock()
var state decodeState
- if err := state.decodeHeader(frame); err != nil {
- t.closeStream(s, err, true, http2.ErrCodeProtocol, status.New(codes.Internal, err.Error()), nil, false)
+ if err := state.decodeResponseHeader(frame); err != nil {
+ s.mu.Lock()
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: err})
// Something wrong. Stops reading even when there is remaining.
return
}
@@ -1153,29 +1108,39 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
}()
- // If headers haven't been received yet.
- if atomic.SwapUint32(&s.headerDone, 1) == 0 {
- if !endStream {
- // Headers frame is not actually a trailers-only frame.
- isHeader = true
- // These values can be set without any synchronization because
- // stream goroutine will read it only after seeing a closed
- // headerChan which we'll close after setting this.
- s.recvCompress = state.encoding
- if len(state.mdata) > 0 {
- s.header = state.mdata
- }
- } else {
- s.noHeaders = true
+
+ s.mu.Lock()
+ if !endStream {
+ s.recvCompress = state.encoding
+ }
+ if !s.headerDone {
+ if !endStream && len(state.mdata) > 0 {
+ s.header = state.mdata
}
close(s.headerChan)
+ s.headerDone = true
+ isHeader = true
}
- if !endStream {
+ if !endStream || s.state == streamDone {
+ s.mu.Unlock()
return
}
- // if client received END_STREAM from server while stream was still active, send RST_STREAM
- rst := s.getState() == streamActive
- t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.mdata, true)
+ if len(state.mdata) > 0 {
+ s.trailer = state.mdata
+ }
+ s.finish(state.status())
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+}
+
+func handleMalformedHTTP2(s *Stream, err error) {
+ s.mu.Lock()
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: err})
}
// reader runs as a separate goroutine in charge of reading data from network
@@ -1185,20 +1150,16 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
// optimal.
// TODO(zhaoq): Check the validity of the incoming frame sequence.
func (t *http2Client) reader() {
- defer close(t.readerDone)
// Check the validity of server preface.
frame, err := t.framer.fr.ReadFrame()
if err != nil {
- t.Close() // this kicks off resetTransport, so must be last before return
+ t.Close()
return
}
- t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
- if t.keepaliveEnabled {
- atomic.CompareAndSwapUint32(&t.activity, 0, 1)
- }
+ atomic.CompareAndSwapUint32(&t.activity, 0, 1)
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
- t.Close() // this kicks off resetTransport, so must be last before return
+ t.Close()
return
}
t.onSuccess()
@@ -1207,9 +1168,7 @@ func (t *http2Client) reader() {
// loop to keep reading incoming messages on this transport.
for {
frame, err := t.framer.fr.ReadFrame()
- if t.keepaliveEnabled {
- atomic.CompareAndSwapUint32(&t.activity, 0, 1)
- }
+ atomic.CompareAndSwapUint32(&t.activity, 0, 1)
if err != nil {
// Abort an active stream if the http2.Framer returns a
// http2.StreamError. This can happen only if the server's response
@@ -1220,9 +1179,7 @@ func (t *http2Client) reader() {
t.mu.Unlock()
if s != nil {
// use error detail to provide better err message
- code := http2ErrConvTab[se.Code]
- msg := t.framer.fr.ErrorDetail().Error()
- t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
+ handleMalformedHTTP2(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()))
}
continue
} else {
@@ -1252,6 +1209,109 @@ func (t *http2Client) reader() {
}
}
+func (t *http2Client) applySettings(ss []http2.Setting) {
+ for _, s := range ss {
+ switch s.ID {
+ case http2.SettingMaxConcurrentStreams:
+ // TODO(zhaoq): This is a hack to avoid significant refactoring of the
+ // code to deal with the unrealistic int32 overflow. Probably will try
+ // to find a better way to handle this later.
+ if s.Val > math.MaxInt32 {
+ s.Val = math.MaxInt32
+ }
+ ms := t.maxStreams
+ t.maxStreams = int(s.Val)
+ t.streamsQuota.add(int(s.Val) - ms)
+ case http2.SettingInitialWindowSize:
+ t.mu.Lock()
+ for _, stream := range t.activeStreams {
+ // Adjust the sending quota for each stream.
+ stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota))
+ }
+ t.streamSendQuota = s.Val
+ t.mu.Unlock()
+ }
+ }
+}
+
+// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
+// is duplicated between the client and the server.
+// The transport layer needs to be refactored to take care of this.
+func (t *http2Client) itemHandler(i item) (err error) {
+ defer func() {
+ if err != nil {
+ errorf(" error in itemHandler: %v", err)
+ }
+ }()
+ switch i := i.(type) {
+ case *dataFrame:
+ if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
+ return err
+ }
+ i.f()
+ return nil
+ case *headerFrame:
+ t.hBuf.Reset()
+ for _, f := range i.hf {
+ t.hEnc.WriteField(f)
+ }
+ endHeaders := false
+ first := true
+ for !endHeaders {
+ size := t.hBuf.Len()
+ if size > http2MaxFrameLen {
+ size = http2MaxFrameLen
+ } else {
+ endHeaders = true
+ }
+ if first {
+ first = false
+ err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{
+ StreamID: i.streamID,
+ BlockFragment: t.hBuf.Next(size),
+ EndStream: i.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = t.framer.fr.WriteContinuation(
+ i.streamID,
+ endHeaders,
+ t.hBuf.Next(size),
+ )
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ case *windowUpdate:
+ return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
+ case *settings:
+ return t.framer.fr.WriteSettings(i.ss...)
+ case *settingsAck:
+ return t.framer.fr.WriteSettingsAck()
+ case *resetStream:
+ // If the server needs to be to intimated about stream closing,
+ // then we need to make sure the RST_STREAM frame is written to
+ // the wire before the headers of the next stream waiting on
+ // streamQuota. We ensure this by adding to the streamsQuota pool
+ // only after having acquired the writableChan to send RST_STREAM.
+ err := t.framer.fr.WriteRSTStream(i.streamID, i.code)
+ t.streamsQuota.add(1)
+ return err
+ case *flushIO:
+ return t.framer.writer.Flush()
+ case *ping:
+ if !i.ack {
+ t.bdpEst.timesnap(i.data)
+ }
+ return t.framer.fr.WritePing(i.ack, i.data)
+ default:
+ errorf("transport: http2Client.controller got unexpected item type %v", i)
+ return fmt.Errorf("transport: http2Client.controller got unexpected item type %v", i)
+ }
+}
+
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
p := &ping{data: [8]byte{}}
@@ -1278,9 +1338,6 @@ func (t *http2Client) keepalive() {
}
} else {
t.mu.Unlock()
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
- }
// Send ping.
t.controlBuf.put(p)
}
@@ -1317,52 +1374,3 @@ func (t *http2Client) Error() <-chan struct{} {
func (t *http2Client) GoAway() <-chan struct{} {
return t.goAway
}
-
-func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
- s := channelz.SocketInternalMetric{
- StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
- StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
- StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
- MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
- MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
- KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
- LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
- LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
- LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
- LocalFlowControlWindow: int64(t.fc.getSize()),
- SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- // RemoteName :
- }
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
- s.Security = au.GetSecurityValue()
- }
- s.RemoteFlowControlWindow = t.getOutFlowWindow()
- return &s
-}
-
-func (t *http2Client) IncrMsgSent() {
- atomic.AddInt64(&t.czData.msgSent, 1)
- atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
-}
-
-func (t *http2Client) IncrMsgRecv() {
- atomic.AddInt64(&t.czData.msgRecv, 1)
- atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
-}
-
-func (t *http2Client) getOutFlowWindow() int64 {
- resp := make(chan uint32, 1)
- timer := time.NewTimer(time.Second)
- defer timer.Stop()
- t.controlBuf.put(&outFlowControlSizeRequest{resp})
- select {
- case sz := <-resp:
- return int64(sz)
- case <-t.ctxDone:
- return -1
- case <-timer.C:
- return -2
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go
index efb7f53ff..6d252c53a 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/transport/http2_server.go
@@ -24,6 +24,7 @@ import (
"fmt"
"io"
"math"
+ "math/rand"
"net"
"strconv"
"sync"
@@ -34,12 +35,8 @@ import (
"golang.org/x/net/context"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
-
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@@ -48,37 +45,35 @@ import (
"google.golang.org/grpc/tap"
)
-var (
- // ErrIllegalHeaderWrite indicates that setting header is illegal because of
- // the stream's state.
- ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
- // ErrHeaderListSizeLimitViolation indicates that the header list size is larger
- // than the limit set by peer.
- ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
-)
+// ErrIllegalHeaderWrite indicates that setting header is illegal because of
+// the stream's state.
+var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
ctx context.Context
- ctxDone <-chan struct{} // Cache the context.Done() chan
cancel context.CancelFunc
conn net.Conn
- loopy *loopyWriter
- readerDone chan struct{} // sync point to enable testing.
- writerDone chan struct{} // sync point to enable testing.
remoteAddr net.Addr
localAddr net.Addr
maxStreamID uint32 // max stream ID ever seen
authInfo credentials.AuthInfo // auth info about the connection
inTapHandle tap.ServerInHandle
framer *framer
+ hBuf *bytes.Buffer // the buffer for HPACK encoding
+ hEnc *hpack.Encoder // HPACK encoder
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
- fc *trInFlow
- stats stats.Handler
+ fc *inFlow
+ // sendQuotaPool provides flow control to outbound message.
+ sendQuotaPool *quotaPool
+ // localSendQuota limits the amount of data that can be scheduled
+ // for writing before it is actually written out.
+ localSendQuota *quotaPool
+ stats stats.Handler
// Flag to keep track of reading activity on transport.
// 1 is true and 0 is false.
activity uint32 // Accessed atomically.
@@ -94,10 +89,9 @@ type http2Server struct {
// Flag to signify that number of ping strikes should be reset to 0.
// This is set whenever data or header frames are sent.
// 1 means yes.
- resetPingStrikes uint32 // Accessed atomically.
- initialWindowSize int32
- bdpEst *bdpEstimator
- maxSendHeaderListSize *uint32
+ resetPingStrikes uint32 // Accessed atomically.
+ initialWindowSize int32
+ bdpEst *bdpEstimator
mu sync.Mutex // guard the following
@@ -110,27 +104,27 @@ type http2Server struct {
drainChan chan struct{}
state transportState
activeStreams map[uint32]*Stream
+ // the per-stream outbound flow control window size set by the peer.
+ streamSendQuota uint32
// idle is the time instant when the connection went idle.
// This is either the beginning of the connection or when the number of
// RPCs go down to 0.
// When the connection is busy, this value is set to 0.
idle time.Time
-
- // Fields below are for channelz metric collection.
- channelzID int64 // channelz unique identification number
- czData *channelzData
}
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
// returned if something goes wrong.
func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
- writeBufSize := config.WriteBufferSize
- readBufSize := config.ReadBufferSize
- maxHeaderListSize := defaultServerMaxHeaderListSize
- if config.MaxHeaderListSize != nil {
- maxHeaderListSize = *config.MaxHeaderListSize
+ writeBufSize := defaultWriteBufSize
+ if config.WriteBufferSize > 0 {
+ writeBufSize = config.WriteBufferSize
+ }
+ readBufSize := defaultReadBufSize
+ if config.ReadBufferSize > 0 {
+ readBufSize = config.ReadBufferSize
}
- framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
+ framer := newFramer(conn, writeBufSize, readBufSize)
// Send initial settings as connection preface to client.
var isettings []http2.Setting
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
@@ -160,12 +154,6 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
ID: http2.SettingInitialWindowSize,
Val: uint32(iwz)})
}
- if config.MaxHeaderListSize != nil {
- isettings = append(isettings, http2.Setting{
- ID: http2.SettingMaxHeaderListSize,
- Val: *config.MaxHeaderListSize,
- })
- }
if err := framer.fr.WriteSettings(isettings...); err != nil {
return nil, connectionErrorf(false, err, "transport: %v", err)
}
@@ -197,31 +185,33 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
+ var buf bytes.Buffer
ctx, cancel := context.WithCancel(context.Background())
t := &http2Server{
ctx: ctx,
cancel: cancel,
- ctxDone: ctx.Done(),
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
authInfo: config.AuthInfo,
framer: framer,
- readerDone: make(chan struct{}),
- writerDone: make(chan struct{}),
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
maxStreams: maxStreams,
inTapHandle: config.InTapHandle,
- fc: &trInFlow{limit: uint32(icwz)},
+ controlBuf: newControlBuffer(),
+ fc: &inFlow{limit: uint32(icwz)},
+ sendQuotaPool: newQuotaPool(defaultWindowSize),
+ localSendQuota: newQuotaPool(defaultLocalSendQuota),
state: reachable,
activeStreams: make(map[uint32]*Stream),
+ streamSendQuota: defaultWindowSize,
stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
- czData: new(channelzData),
}
- t.controlBuf = newControlBuffer(t.ctxDone)
if dynamicWindow {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
@@ -236,9 +226,6 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
connBegin := &stats.ConnBegin{}
t.stats.HandleConn(t.ctx, connBegin)
}
- if channelz.IsOn() {
- t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "")
- }
t.framer.writer.Flush()
defer func() {
@@ -271,44 +258,37 @@ func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
- t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
- if err := t.loopy.run(); err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
- }
+ loopyWriter(t.ctx, t.controlBuf, t.itemHandler)
t.conn.Close()
- close(t.writerDone)
}()
go t.keepalive()
return t, nil
}
// operateHeader takes action on the decoded headers.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) {
streamID := frame.Header().StreamID
- state := decodeState{serverSide: true}
- if err := state.decodeHeader(frame); err != nil {
- if se, ok := status.FromError(err); ok {
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: statusCodeConvTab[se.Code()],
- onWrite: func() {},
- })
+
+ var state decodeState
+ for _, hf := range frame.Fields {
+ if err := state.processHeaderField(hf); err != nil {
+ if se, ok := err.(StreamError); ok {
+ t.controlBuf.put(&resetStream{streamID, statusCodeConvTab[se.Code]})
+ }
+ return
}
- return false
}
buf := newRecvBuffer()
s := &Stream{
- id: streamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
- recvCompress: state.encoding,
- method: state.method,
- contentSubtype: state.contentSubtype,
+ id: streamID,
+ st: t,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ recvCompress: state.encoding,
+ method: state.method,
}
+
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
@@ -326,6 +306,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
pr.AuthInfo = t.authInfo
}
s.ctx = peer.NewContext(s.ctx, pr)
+ // Cache the current stream to the context so that the server application
+ // can find out. Required when the server wants to send some metadata
+ // back to the client (unary call only).
+ s.ctx = newContextWithStream(s.ctx, s)
// Attach the received metadata to the context.
if len(state.mdata) > 0 {
s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata)
@@ -344,29 +328,19 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
s.ctx, err = t.inTapHandle(s.ctx, info)
if err != nil {
warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
- t.controlBuf.put(&cleanupStream{
- streamID: s.id,
- rst: true,
- rstCode: http2.ErrCodeRefusedStream,
- onWrite: func() {},
- })
- return false
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
+ return
}
}
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
- return false
+ return
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: http2.ErrCodeRefusedStream,
- onWrite: func() {},
- })
- return false
+ t.controlBuf.put(&resetStream{streamID, http2.ErrCodeRefusedStream})
+ return
}
if streamID%2 != 1 || streamID <= t.maxStreamID {
t.mu.Unlock()
@@ -375,15 +349,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
return true
}
t.maxStreamID = streamID
+ s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
t.mu.Unlock()
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.streamsStarted, 1)
- atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
- }
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
@@ -399,51 +370,39 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
}
t.stats.HandleRPC(s.ctx, inHeader)
}
- s.ctxDone = s.ctx.Done()
- s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
+ ctx: s.ctx,
+ recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
},
}
- // Register the stream with loopy.
- t.controlBuf.put(&registerStream{
- streamID: s.id,
- wq: s.wq,
- })
+ s.waiters = waiters{
+ ctx: s.ctx,
+ tctx: t.ctx,
+ }
handle(s)
- return false
+ return
}
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
- defer close(t.readerDone)
for {
frame, err := t.framer.fr.ReadFrame()
atomic.StoreUint32(&t.activity, 1)
if err != nil {
if se, ok := err.(http2.StreamError); ok {
- warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
t.mu.Lock()
s := t.activeStreams[se.StreamID]
t.mu.Unlock()
if s != nil {
- t.closeStream(s, true, se.Code, nil, false)
- } else {
- t.controlBuf.put(&cleanupStream{
- streamID: se.StreamID,
- rst: true,
- rstCode: se.Code,
- onWrite: func() {},
- })
+ t.closeStream(s)
}
+ t.controlBuf.put(&resetStream{se.StreamID, se.Code})
continue
}
if err == io.EOF || err == io.ErrUnexpectedEOF {
@@ -497,20 +456,33 @@ func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
// of stream if the application is requesting data larger in size than
// the window.
func (t *http2Server) adjustWindow(s *Stream, n uint32) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.state == streamDone {
+ return
+ }
if w := s.fc.maybeAdjust(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
+ if cw := t.fc.resetPendingUpdate(); cw > 0 {
+ t.controlBuf.put(&windowUpdate{0, cw})
+ }
+ t.controlBuf.put(&windowUpdate{s.id, w})
}
-
}
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
func (t *http2Server) updateWindow(s *Stream, n uint32) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.state == streamDone {
+ return
+ }
if w := s.fc.onRead(n); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
- increment: w,
- })
+ if cw := t.fc.resetPendingUpdate(); cw > 0 {
+ t.controlBuf.put(&windowUpdate{0, cw})
+ }
+ t.controlBuf.put(&windowUpdate{s.id, w})
}
}
@@ -524,15 +496,12 @@ func (t *http2Server) updateFlowControl(n uint32) {
}
t.initialWindowSize = int32(n)
t.mu.Unlock()
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: t.fc.newLimit(n),
- })
- t.controlBuf.put(&outgoingSettings{
+ t.controlBuf.put(&windowUpdate{0, t.fc.newLimit(n)})
+ t.controlBuf.put(&settings{
ss: []http2.Setting{
{
ID: http2.SettingInitialWindowSize,
- Val: n,
+ Val: uint32(n),
},
},
})
@@ -543,7 +512,7 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
- sendBDPPing = t.bdpEst.add(size)
+ sendBDPPing = t.bdpEst.add(uint32(size))
}
// Decouple connection's flow control from application's read.
// An update on connection's flow control should not depend on
@@ -553,22 +522,23 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// Decoupling the connection flow control will prevent other
// active(fast) streams from starving in presence of slow or
// inactive streams.
- if w := t.fc.onData(size); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
- }
+ //
+ // Furthermore, if a bdpPing is being sent out we can piggyback
+ // connection's window update for the bytes we just received.
if sendBDPPing {
- // Avoid excessive ping detection (e.g. in an L7 proxy)
- // by sending a window update prior to the BDP ping.
- if w := t.fc.reset(); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{
- streamID: 0,
- increment: w,
- })
+ if size != 0 { // Could be an empty frame.
+ t.controlBuf.put(&windowUpdate{0, uint32(size)})
}
t.controlBuf.put(bdpPing)
+ } else {
+ if err := t.fc.onData(uint32(size)); err != nil {
+ errorf("transport: http2Server %v", err)
+ t.Close()
+ return
+ }
+ if w := t.fc.onRead(uint32(size)); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
}
// Select the right stream to dispatch.
s, ok := t.getStream(f)
@@ -576,15 +546,23 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
return
}
if size > 0 {
- if err := s.fc.onData(size); err != nil {
- t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false)
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ if err := s.fc.onData(uint32(size)); err != nil {
+ s.mu.Unlock()
+ t.closeStream(s)
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
return
}
if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
- t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
+ if w := s.fc.onRead(uint32(size) - uint32(len(f.Data()))); w > 0 {
+ t.controlBuf.put(&windowUpdate{s.id, w})
}
}
+ s.mu.Unlock()
// TODO(bradfitz, zhaoq): A copy is required here because there is no
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
@@ -596,7 +574,11 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
}
if f.Header().Flags.Has(http2.FlagDataEndStream) {
// Received the end of stream from the client.
- s.compareAndSwapState(streamActive, streamReadDone)
+ s.mu.Lock()
+ if s.state != streamDone {
+ s.state = streamReadDone
+ }
+ s.mu.Unlock()
s.write(recvMsg{err: io.EOF})
}
}
@@ -606,35 +588,50 @@ func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
if !ok {
return
}
- t.closeStream(s, false, 0, nil, false)
+ t.closeStream(s)
}
func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
if f.IsAck() {
return
}
- var ss []http2.Setting
- var updateFuncs []func()
+ var rs []http2.Setting
+ var ps []http2.Setting
f.ForeachSetting(func(s http2.Setting) error {
- switch s.ID {
- case http2.SettingMaxHeaderListSize:
- updateFuncs = append(updateFuncs, func() {
- t.maxSendHeaderListSize = new(uint32)
- *t.maxSendHeaderListSize = s.Val
- })
- default:
- ss = append(ss, s)
+ if t.isRestrictive(s) {
+ rs = append(rs, s)
+ } else {
+ ps = append(ps, s)
}
return nil
})
- t.controlBuf.executeAndPut(func(interface{}) bool {
- for _, f := range updateFuncs {
- f()
+ t.applySettings(rs)
+ t.controlBuf.put(&settingsAck{})
+ t.applySettings(ps)
+}
+
+func (t *http2Server) isRestrictive(s http2.Setting) bool {
+ switch s.ID {
+ case http2.SettingInitialWindowSize:
+ // Note: we don't acquire a lock here to read streamSendQuota
+ // because the same goroutine updates it later.
+ return s.Val < t.streamSendQuota
+ }
+ return false
+}
+
+func (t *http2Server) applySettings(ss []http2.Setting) {
+ for _, s := range ss {
+ if s.ID == http2.SettingInitialWindowSize {
+ t.mu.Lock()
+ for _, stream := range t.activeStreams {
+ stream.sendQuotaPool.addAndUpdate(int(s.Val) - int(t.streamSendQuota))
+ }
+ t.streamSendQuota = s.Val
+ t.mu.Unlock()
}
- return true
- }, &incomingSettings{
- ss: ss,
- })
+
+ }
}
const (
@@ -693,46 +690,33 @@ func (t *http2Server) handlePing(f *http2.PingFrame) {
}
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
- t.controlBuf.put(&incomingWindowUpdate{
- streamID: f.Header().StreamID,
- increment: f.Increment,
- })
-}
-
-func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField {
- for k, vv := range md {
- if isReservedHeader(k) {
- // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
- }
- return headerFields
-}
-
-func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
- if t.maxSendHeaderListSize == nil {
- return true
+ id := f.Header().StreamID
+ incr := f.Increment
+ if id == 0 {
+ t.sendQuotaPool.add(int(incr))
+ return
}
- hdrFrame := it.(*headerFrame)
- var sz int64
- for _, f := range hdrFrame.hf {
- if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
- return false
- }
+ if s, ok := t.getStream(f); ok {
+ s.sendQuotaPool.add(int(incr))
}
- return true
}
// WriteHeader sends the header metedata md back to the client.
func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
- if s.updateHeaderSent() || s.getState() == streamDone {
+ select {
+ case <-s.ctx.Done():
+ return ContextErr(s.ctx.Err())
+ case <-t.ctx.Done():
+ return ErrConnClosing
+ default:
+ }
+
+ s.mu.Lock()
+ if s.headerOk || s.state == streamDone {
+ s.mu.Unlock()
return ErrIllegalHeaderWrite
}
- s.hdrMu.Lock()
+ s.headerOk = true
if md.Len() > 0 {
if s.header.Len() > 0 {
s.header = metadata.Join(s.header, md)
@@ -740,43 +724,34 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
s.header = md
}
}
- if err := t.writeHeaderLocked(s); err != nil {
- s.hdrMu.Unlock()
- return err
- }
- s.hdrMu.Unlock()
- return nil
-}
-
-func (t *http2Server) writeHeaderLocked(s *Stream) error {
+ md = s.header
+ s.mu.Unlock()
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
if s.sendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
- headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
- success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
+ for k, vv := range md {
+ if isReservedHeader(k) {
+ // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+ continue
+ }
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+ }
+ }
+ t.controlBuf.put(&headerFrame{
streamID: s.id,
hf: headerFields,
endStream: false,
- onWrite: func() {
- atomic.StoreUint32(&t.resetPingStrikes, 1)
- },
})
- if !success {
- if err != nil {
- return err
- }
- t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
- return ErrHeaderListSizeLimitViolation
- }
if t.stats != nil {
- // Note: WireLength is not set in outHeader.
- // TODO(mmukhi): Revisit this later, if needed.
- outHeader := &stats.OutHeader{}
+ outHeader := &stats.OutHeader{
+ //WireLength: // TODO(mmukhi): Revisit this later, if needed.
+ }
t.stats.HandleRPC(s.Context(), outHeader)
}
return nil
@@ -787,23 +762,37 @@ func (t *http2Server) writeHeaderLocked(s *Stream) error {
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
- if s.getState() == streamDone {
+ select {
+ case <-t.ctx.Done():
+ return ErrConnClosing
+ default:
+ }
+
+ var headersSent, hasHeader bool
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
return nil
}
- s.hdrMu.Lock()
+ if s.headerOk {
+ headersSent = true
+ }
+ if s.header.Len() > 0 {
+ hasHeader = true
+ }
+ s.mu.Unlock()
+
+ if !headersSent && hasHeader {
+ t.WriteHeader(s, nil)
+ headersSent = true
+ }
+
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
- if !s.updateHeaderSent() { // No headers have been sent.
- if len(s.header) > 0 { // Send a separate header frame.
- if err := t.writeHeaderLocked(s); err != nil {
- s.hdrMu.Unlock()
- return err
- }
- } else { // Send a trailer only response.
- headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
- }
+ if !headersSent {
+ headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
}
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
@@ -812,83 +801,129 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
- grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
- } else {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
+ panic(err)
}
+
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
}
// Attach the trailer metadata.
- headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer)
- trailingHeader := &headerFrame{
+ for k, vv := range s.trailer {
+ // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
+ }
+ }
+ t.controlBuf.put(&headerFrame{
streamID: s.id,
hf: headerFields,
endStream: true,
- onWrite: func() {
- atomic.StoreUint32(&t.resetPingStrikes, 1)
- },
- }
- s.hdrMu.Unlock()
- success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
- if !success {
- if err != nil {
- return err
- }
- t.closeStream(s, true, http2.ErrCodeInternal, nil, false)
- return ErrHeaderListSizeLimitViolation
- }
- t.closeStream(s, false, 0, trailingHeader, true)
+ })
if t.stats != nil {
t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
}
+ t.closeStream(s)
return nil
}
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
- if !s.isHeaderSent() { // Headers haven't been written yet.
- if err := t.WriteHeader(s, nil); err != nil {
- // TODO(mmukhi, dfawley): Make sure this is the right code to return.
- return status.Errorf(codes.Internal, "transport: %v", err)
- }
- } else {
- // Writing headers checks for this condition.
- if s.getState() == streamDone {
- // TODO(mmukhi, dfawley): Should the server write also return io.EOF?
- s.cancel()
- select {
- case <-t.ctx.Done():
- return ErrConnClosing
- default:
- }
- return ContextErr(s.ctx.Err())
- }
+ select {
+ case <-s.ctx.Done():
+ return ContextErr(s.ctx.Err())
+ case <-t.ctx.Done():
+ return ErrConnClosing
+ default:
+ }
+
+ var writeHeaderFrame bool
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return streamErrorf(codes.Unknown, "the stream has been done")
+ }
+ if !s.headerOk {
+ writeHeaderFrame = true
}
- // Add some data to header frame so that we can equally distribute bytes across frames.
+ s.mu.Unlock()
+ if writeHeaderFrame {
+ t.WriteHeader(s, nil)
+ }
+ // Add data to header frame so that we can equally distribute data across frames.
emptyLen := http2MaxFrameLen - len(hdr)
if emptyLen > len(data) {
emptyLen = len(data)
}
hdr = append(hdr, data[:emptyLen]...)
data = data[emptyLen:]
- df := &dataFrame{
- streamID: s.id,
- h: hdr,
- d: data,
- onEachWrite: func() {
+ var (
+ streamQuota int
+ streamQuotaVer uint32
+ err error
+ )
+ for _, r := range [][]byte{hdr, data} {
+ for len(r) > 0 {
+ size := http2MaxFrameLen
+ if size > len(r) {
+ size = len(r)
+ }
+ if streamQuota == 0 { // Used up all the locally cached stream quota.
+ // Get all the stream quota there is.
+ streamQuota, streamQuotaVer, err = s.sendQuotaPool.get(math.MaxInt32, s.waiters)
+ if err != nil {
+ return err
+ }
+ }
+ if size > streamQuota {
+ size = streamQuota
+ }
+ // Get size worth quota from transport.
+ tq, _, err := t.sendQuotaPool.get(size, s.waiters)
+ if err != nil {
+ return err
+ }
+ if tq < size {
+ size = tq
+ }
+ ltq, _, err := t.localSendQuota.get(size, s.waiters)
+ if err != nil {
+ return err
+ }
+ // even if ltq is smaller than size we don't adjust size since,
+ // ltq is only a soft limit.
+ streamQuota -= size
+ p := r[:size]
+ // Reset ping strikes when sending data since this might cause
+ // the peer to send ping.
atomic.StoreUint32(&t.resetPingStrikes, 1)
- },
- }
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
- select {
- case <-t.ctx.Done():
- return ErrConnClosing
- default:
+ success := func() {
+ ltq := ltq
+ t.controlBuf.put(&dataFrame{streamID: s.id, endStream: false, d: p, f: func() {
+ t.localSendQuota.add(ltq)
+ }})
+ r = r[size:]
+ }
+ failure := func() { // The stream quota version must have changed.
+ // Our streamQuota cache is invalidated now, so give it back.
+ s.sendQuotaPool.lockedAdd(streamQuota + size)
+ }
+ if !s.sendQuotaPool.compareAndExecute(streamQuotaVer, success, failure) {
+ // Couldn't send this chunk out.
+ t.sendQuotaPool.add(size)
+ t.localSendQuota.add(ltq)
+ streamQuota = 0
+ }
}
- return ContextErr(s.ctx.Err())
}
- return t.controlBuf.put(df)
+ if streamQuota > 0 {
+ // ADd the left over quota back to stream.
+ s.sendQuotaPool.add(streamQuota)
+ }
+ return nil
}
// keepalive running in a separate goroutine does the following:
@@ -933,7 +968,7 @@ func (t *http2Server) keepalive() {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
t.drain(http2.ErrCodeNo, []byte{})
- // Resetting the timer so that the clean-up doesn't deadlock.
+ // Reseting the timer so that the clean-up doesn't deadlock.
maxIdle.Reset(infinity)
return
}
@@ -945,7 +980,7 @@ func (t *http2Server) keepalive() {
case <-maxAge.C:
// Close the connection after grace period.
t.Close()
- // Resetting the timer so that the clean-up doesn't deadlock.
+ // Reseting the timer so that the clean-up doesn't deadlock.
maxAge.Reset(infinity)
case <-t.ctx.Done():
}
@@ -958,14 +993,11 @@ func (t *http2Server) keepalive() {
}
if pingSent {
t.Close()
- // Resetting the timer so that the clean-up doesn't deadlock.
+ // Reseting the timer so that the clean-up doesn't deadlock.
keepalive.Reset(infinity)
return
}
pingSent = true
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
- }
t.controlBuf.put(p)
keepalive.Reset(t.kp.Timeout)
case <-t.ctx.Done():
@@ -974,6 +1006,133 @@ func (t *http2Server) keepalive() {
}
}
+var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
+
+// TODO(mmukhi): A lot of this code(and code in other places in the tranpsort layer)
+// is duplicated between the client and the server.
+// The transport layer needs to be refactored to take care of this.
+func (t *http2Server) itemHandler(i item) error {
+ switch i := i.(type) {
+ case *dataFrame:
+ if err := t.framer.fr.WriteData(i.streamID, i.endStream, i.d); err != nil {
+ return err
+ }
+ i.f()
+ return nil
+ case *headerFrame:
+ t.hBuf.Reset()
+ for _, f := range i.hf {
+ t.hEnc.WriteField(f)
+ }
+ first := true
+ endHeaders := false
+ for !endHeaders {
+ size := t.hBuf.Len()
+ if size > http2MaxFrameLen {
+ size = http2MaxFrameLen
+ } else {
+ endHeaders = true
+ }
+ var err error
+ if first {
+ first = false
+ err = t.framer.fr.WriteHeaders(http2.HeadersFrameParam{
+ StreamID: i.streamID,
+ BlockFragment: t.hBuf.Next(size),
+ EndStream: i.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = t.framer.fr.WriteContinuation(
+ i.streamID,
+ endHeaders,
+ t.hBuf.Next(size),
+ )
+ }
+ if err != nil {
+ return err
+ }
+ }
+ atomic.StoreUint32(&t.resetPingStrikes, 1)
+ return nil
+ case *windowUpdate:
+ return t.framer.fr.WriteWindowUpdate(i.streamID, i.increment)
+ case *settings:
+ return t.framer.fr.WriteSettings(i.ss...)
+ case *settingsAck:
+ return t.framer.fr.WriteSettingsAck()
+ case *resetStream:
+ return t.framer.fr.WriteRSTStream(i.streamID, i.code)
+ case *goAway:
+ t.mu.Lock()
+ if t.state == closing {
+ t.mu.Unlock()
+ // The transport is closing.
+ return fmt.Errorf("transport: Connection closing")
+ }
+ sid := t.maxStreamID
+ if !i.headsUp {
+ // Stop accepting more streams now.
+ t.state = draining
+ if len(t.activeStreams) == 0 {
+ i.closeConn = true
+ }
+ t.mu.Unlock()
+ if err := t.framer.fr.WriteGoAway(sid, i.code, i.debugData); err != nil {
+ return err
+ }
+ if i.closeConn {
+ // Abruptly close the connection following the GoAway (via
+ // loopywriter). But flush out what's inside the buffer first.
+ t.controlBuf.put(&flushIO{closeTr: true})
+ }
+ return nil
+ }
+ t.mu.Unlock()
+ // For a graceful close, send out a GoAway with stream ID of MaxUInt32,
+ // Follow that with a ping and wait for the ack to come back or a timer
+ // to expire. During this time accept new streams since they might have
+ // originated before the GoAway reaches the client.
+ // After getting the ack or timer expiration send out another GoAway this
+ // time with an ID of the max stream server intends to process.
+ if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
+ return err
+ }
+ if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
+ return err
+ }
+ go func() {
+ timer := time.NewTimer(time.Minute)
+ defer timer.Stop()
+ select {
+ case <-t.drainChan:
+ case <-timer.C:
+ case <-t.ctx.Done():
+ return
+ }
+ t.controlBuf.put(&goAway{code: i.code, debugData: i.debugData})
+ }()
+ return nil
+ case *flushIO:
+ if err := t.framer.writer.Flush(); err != nil {
+ return err
+ }
+ if i.closeTr {
+ return ErrConnClosing
+ }
+ return nil
+ case *ping:
+ if !i.ack {
+ t.bdpEst.timesnap(i.data)
+ }
+ return t.framer.fr.WritePing(i.ack, i.data)
+ default:
+ err := status.Errorf(codes.Internal, "transport: http2Server.controller got unexpected item type %t", i)
+ errorf("%v", err)
+ return err
+ }
+}
+
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
@@ -987,12 +1146,8 @@ func (t *http2Server) Close() error {
streams := t.activeStreams
t.activeStreams = nil
t.mu.Unlock()
- t.controlBuf.finish()
t.cancel()
err := t.conn.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(t.channelzID)
- }
// Cancel all active streams.
for _, s := range streams {
s.cancel()
@@ -1006,43 +1161,27 @@ func (t *http2Server) Close() error {
// closeStream clears the footprint of a stream when the stream is not needed
// any more.
-func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
- if s.swapState(streamDone) == streamDone {
- // If the stream was already done, return.
- return
+func (t *http2Server) closeStream(s *Stream) {
+ t.mu.Lock()
+ delete(t.activeStreams, s.id)
+ if len(t.activeStreams) == 0 {
+ t.idle = time.Now()
}
+ if t.state == draining && len(t.activeStreams) == 0 {
+ defer t.controlBuf.put(&flushIO{closeTr: true})
+ }
+ t.mu.Unlock()
// In case stream sending and receiving are invoked in separate
// goroutines (e.g., bi-directional streaming), cancel needs to be
// called to interrupt the potential blocking on other goroutines.
s.cancel()
- cleanup := &cleanupStream{
- streamID: s.id,
- rst: rst,
- rstCode: rstCode,
- onWrite: func() {
- t.mu.Lock()
- if t.activeStreams != nil {
- delete(t.activeStreams, s.id)
- if len(t.activeStreams) == 0 {
- t.idle = time.Now()
- }
- }
- t.mu.Unlock()
- if channelz.IsOn() {
- if eosReceived {
- atomic.AddInt64(&t.czData.streamsSucceeded, 1)
- } else {
- atomic.AddInt64(&t.czData.streamsFailed, 1)
- }
- }
- },
- }
- if hdr != nil {
- hdr.cleanup = cleanup
- t.controlBuf.put(hdr)
- } else {
- t.controlBuf.put(cleanup)
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
}
+ s.state = streamDone
+ s.mu.Unlock()
}
func (t *http2Server) RemoteAddr() net.Addr {
@@ -1063,111 +1202,7 @@ func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
}
-var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
-
-// Handles outgoing GoAway and returns true if loopy needs to put itself
-// in draining mode.
-func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
- t.mu.Lock()
- if t.state == closing { // TODO(mmukhi): This seems unnecessary.
- t.mu.Unlock()
- // The transport is closing.
- return false, ErrConnClosing
- }
- sid := t.maxStreamID
- if !g.headsUp {
- // Stop accepting more streams now.
- t.state = draining
- if len(t.activeStreams) == 0 {
- g.closeConn = true
- }
- t.mu.Unlock()
- if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
- return false, err
- }
- if g.closeConn {
- // Abruptly close the connection following the GoAway (via
- // loopywriter). But flush out what's inside the buffer first.
- t.framer.writer.Flush()
- return false, fmt.Errorf("transport: Connection closing")
- }
- return true, nil
- }
- t.mu.Unlock()
- // For a graceful close, send out a GoAway with stream ID of MaxUInt32,
- // Follow that with a ping and wait for the ack to come back or a timer
- // to expire. During this time accept new streams since they might have
- // originated before the GoAway reaches the client.
- // After getting the ack or timer expiration send out another GoAway this
- // time with an ID of the max stream server intends to process.
- if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
- return false, err
- }
- if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
- return false, err
- }
- go func() {
- timer := time.NewTimer(time.Minute)
- defer timer.Stop()
- select {
- case <-t.drainChan:
- case <-timer.C:
- case <-t.ctx.Done():
- return
- }
- t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData})
- }()
- return false, nil
-}
-
-func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
- s := channelz.SocketInternalMetric{
- StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
- StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
- StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
- MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
- MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
- KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
- LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
- LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
- LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
- LocalFlowControlWindow: int64(t.fc.getSize()),
- SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- // RemoteName :
- }
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
- s.Security = au.GetSecurityValue()
- }
- s.RemoteFlowControlWindow = t.getOutFlowWindow()
- return &s
-}
-
-func (t *http2Server) IncrMsgSent() {
- atomic.AddInt64(&t.czData.msgSent, 1)
- atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
-}
-
-func (t *http2Server) IncrMsgRecv() {
- atomic.AddInt64(&t.czData.msgRecv, 1)
- atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
-}
-
-func (t *http2Server) getOutFlowWindow() int64 {
- resp := make(chan uint32)
- timer := time.NewTimer(time.Second)
- defer timer.Stop()
- t.controlBuf.put(&outFlowControlSizeRequest{resp})
- select {
- case sz := <-resp:
- return int64(sz)
- case <-t.ctxDone:
- return -1
- case <-timer.C:
- return -2
- }
-}
+var rgen = rand.New(rand.NewSource(time.Now().UnixNano()))
func getJitter(v time.Duration) time.Duration {
if v == infinity {
@@ -1175,6 +1210,6 @@ func getJitter(v time.Duration) time.Duration {
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
- j := grpcrand.Int63n(2*r) - r
+ j := rgen.Int63n(2*r) - r
return time.Duration(j)
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go
index 77a2cfaae..39f878cfd 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/transport/http_util.go
@@ -24,13 +24,11 @@ import (
"encoding/base64"
"fmt"
"io"
- "math"
"net"
"net/http"
"strconv"
"strings"
"time"
- "unicode/utf8"
"github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
@@ -45,12 +43,9 @@ const (
http2MaxFrameLen = 16384 // 16KB frame
// http://http2.github.io/http2-spec/#SettingValues
http2InitHeaderTableSize = 4096
- // baseContentType is the base content-type for gRPC. This is a valid
- // content-type on it's own, but can also include a content-subtype such as
- // "proto" as a suffix after "+" or ";". See
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
- // for more details.
- baseContentType = "application/grpc"
+ // http2IOBufSize specifies the buffer size for sending frames.
+ defaultWriteBufSize = 32 * 1024
+ defaultReadBufSize = 32 * 1024
)
var (
@@ -69,7 +64,7 @@ var (
http2.ErrCodeConnect: codes.Internal,
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
- http2.ErrCodeHTTP11Required: codes.Internal,
+ http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
}
statusCodeConvTab = map[codes.Code]http2.ErrCode{
codes.Internal: http2.ErrCodeInternal,
@@ -116,12 +111,9 @@ type decodeState struct {
timeout time.Duration
method string
// key-value metadata map from the peer.
- mdata map[string][]string
- statsTags []byte
- statsTrace []byte
- contentSubtype string
- // whether decoding on server side or not
- serverSide bool
+ mdata map[string][]string
+ statsTags []byte
+ statsTrace []byte
}
// isReservedHeader checks whether hdr belongs to HTTP2 headers
@@ -133,16 +125,12 @@ func isReservedHeader(hdr string) bool {
}
switch hdr {
case "content-type",
- "user-agent",
"grpc-message-type",
"grpc-encoding",
"grpc-message",
"grpc-status",
"grpc-timeout",
"grpc-status-details-bin",
- // Intentionally exclude grpc-previous-rpc-attempts and
- // grpc-retry-pushback-ms, which are "reserved", but their API
- // intentionally works via metadata.
"te":
return true
default:
@@ -150,55 +138,28 @@ func isReservedHeader(hdr string) bool {
}
}
-// isWhitelistedHeader checks whether hdr should be propagated into metadata
-// visible to users, even though it is classified as "reserved", above.
-func isWhitelistedHeader(hdr string) bool {
+// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
+// that should be propagated into metadata visible to users.
+func isWhitelistedPseudoHeader(hdr string) bool {
switch hdr {
- case ":authority", "user-agent":
+ case ":authority":
return true
default:
return false
}
}
-// contentSubtype returns the content-subtype for the given content-type. The
-// given content-type must be a valid content-type that starts with
-// "application/grpc". A content-subtype will follow "application/grpc" after a
-// "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If contentType is not a valid content-type for gRPC, the boolean
-// will be false, otherwise true. If content-type == "application/grpc",
-// "application/grpc+", or "application/grpc;", the boolean will be true,
-// but no content-subtype will be returned.
-//
-// contentType is assumed to be lowercase already.
-func contentSubtype(contentType string) (string, bool) {
- if contentType == baseContentType {
- return "", true
- }
- if !strings.HasPrefix(contentType, baseContentType) {
- return "", false
- }
- // guaranteed since != baseContentType and has baseContentType prefix
- switch contentType[len(baseContentType)] {
- case '+', ';':
- // this will return true for "application/grpc+" or "application/grpc;"
- // which the previous validContentType function tested to be valid, so we
- // just say that no content-subtype is specified in this case
- return contentType[len(baseContentType)+1:], true
- default:
- return "", false
+func validContentType(t string) bool {
+ e := "application/grpc"
+ if !strings.HasPrefix(t, e) {
+ return false
}
-}
-
-// contentSubtype is assumed to be lowercase
-func contentType(contentSubtype string) string {
- if contentSubtype == "" {
- return baseContentType
+ // Support variations on the content-type
+ // (e.g. "application/grpc+blah", "application/grpc;blah").
+ if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' {
+ return false
}
- return baseContentType + "+" + contentSubtype
+ return true
}
func (d *decodeState) status() *status.Status {
@@ -238,22 +199,13 @@ func decodeMetadataHeader(k, v string) (string, error) {
return v, nil
}
-func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
- // frame.Truncated is set to true when framer detects that the current header
- // list size hits MaxHeaderListSize limit.
- if frame.Truncated {
- return status.Error(codes.Internal, "peer header list size exceeded limit")
- }
+func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error {
for _, hf := range frame.Fields {
if err := d.processHeaderField(hf); err != nil {
return err
}
}
- if d.serverSide {
- return nil
- }
-
// If grpc status exists, no need to check further.
if d.rawStatusCode != nil || d.statusGen != nil {
return nil
@@ -262,7 +214,7 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
// If grpc status doesn't exist and http status doesn't exist,
// then it's a malformed header.
if d.httpStatus == nil {
- return status.Error(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
+ return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)")
}
if *(d.httpStatus) != http.StatusOK {
@@ -270,18 +222,19 @@ func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
if !ok {
code = codes.Unknown
}
- return status.Error(code, http.StatusText(*(d.httpStatus)))
+ return streamErrorf(code, http.StatusText(*(d.httpStatus)))
}
// gRPC status doesn't exist and http status is OK.
// Set rawStatusCode to be unknown and return nil error.
// So that, if the stream has ended this Unknown status
- // will be propagated to the user.
+ // will be propogated to the user.
// Otherwise, it will be ignored. In which case, status from
- // a later trailer, that has StreamEnded flag set, is propagated.
+ // a later trailer, that has StreamEnded flag set, is propogated.
code := int(codes.Unknown)
d.rawStatusCode = &code
return nil
+
}
func (d *decodeState) addMetadata(k, v string) {
@@ -294,22 +247,15 @@ func (d *decodeState) addMetadata(k, v string) {
func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
switch f.Name {
case "content-type":
- contentSubtype, validContentType := contentSubtype(f.Value)
- if !validContentType {
- return status.Errorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value)
+ if !validContentType(f.Value) {
+ return streamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)
}
- d.contentSubtype = contentSubtype
- // TODO: do we want to propagate the whole content-type in the metadata,
- // or come up with a way to just propagate the content-subtype if it was set?
- // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
- // in the metadata?
- d.addMetadata(f.Name, f.Value)
case "grpc-encoding":
d.encoding = f.Value
case "grpc-status":
code, err := strconv.Atoi(f.Value)
if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
+ return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
}
d.rawStatusCode = &code
case "grpc-message":
@@ -317,43 +263,43 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
case "grpc-status-details-bin":
v, err := decodeBinHeader(f.Value)
if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
+ return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
}
s := &spb.Status{}
if err := proto.Unmarshal(v, s); err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
+ return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
}
d.statusGen = status.FromProto(s)
case "grpc-timeout":
d.timeoutSet = true
var err error
if d.timeout, err = decodeTimeout(f.Value); err != nil {
- return status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
+ return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
}
case ":path":
d.method = f.Value
case ":status":
code, err := strconv.Atoi(f.Value)
if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
+ return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err)
}
d.httpStatus = &code
case "grpc-tags-bin":
v, err := decodeBinHeader(f.Value)
if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
+ return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
}
d.statsTags = v
d.addMetadata(f.Name, string(v))
case "grpc-trace-bin":
v, err := decodeBinHeader(f.Value)
if err != nil {
- return status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
+ return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
}
d.statsTrace = v
d.addMetadata(f.Name, string(v))
default:
- if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
+ if isReservedHeader(f.Name) && !isWhitelistedPseudoHeader(f.Name) {
break
}
v, err := decodeMetadataHeader(f.Name, f.Value)
@@ -361,7 +307,7 @@ func (d *decodeState) processHeaderField(f hpack.HeaderField) error {
errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
return nil
}
- d.addMetadata(f.Name, v)
+ d.addMetadata(f.Name, string(v))
}
return nil
}
@@ -436,10 +382,6 @@ func decodeTimeout(s string) (time.Duration, error) {
if size < 2 {
return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
}
- if size > 9 {
- // Spec allows for 8 digits plus the unit.
- return 0, fmt.Errorf("transport: timeout string is too long: %q", s)
- }
unit := timeoutUnit(s[size-1])
d, ok := timeoutUnitToDuration(unit)
if !ok {
@@ -449,27 +391,21 @@ func decodeTimeout(s string) (time.Duration, error) {
if err != nil {
return 0, err
}
- const maxHours = math.MaxInt64 / int64(time.Hour)
- if d == time.Hour && t > maxHours {
- // This timeout would overflow math.MaxInt64; clamp it.
- return time.Duration(math.MaxInt64), nil
- }
return d * time.Duration(t), nil
}
const (
spaceByte = ' '
- tildeByte = '~'
+ tildaByte = '~'
percentByte = '%'
)
// encodeGrpcMessage is used to encode status code in header field
-// "grpc-message". It does percent encoding and also replaces invalid utf-8
-// characters with Unicode replacement character.
-//
-// It checks to see if each individual byte in msg is an allowable byte, and
-// then either percent encoding or passing it through. When percent encoding,
-// the byte is converted into hexadecimal notation with a '%' prepended.
+// "grpc-message".
+// It checks to see if each individual byte in msg is an
+// allowable byte, and then either percent encoding or passing it through.
+// When percent encoding, the byte is converted into hexadecimal notation
+// with a '%' prepended.
func encodeGrpcMessage(msg string) string {
if msg == "" {
return ""
@@ -477,7 +413,7 @@ func encodeGrpcMessage(msg string) string {
lenMsg := len(msg)
for i := 0; i < lenMsg; i++ {
c := msg[i]
- if !(c >= spaceByte && c <= tildeByte && c != percentByte) {
+ if !(c >= spaceByte && c < tildaByte && c != percentByte) {
return encodeGrpcMessageUnchecked(msg)
}
}
@@ -486,26 +422,14 @@ func encodeGrpcMessage(msg string) string {
func encodeGrpcMessageUnchecked(msg string) string {
var buf bytes.Buffer
- for len(msg) > 0 {
- r, size := utf8.DecodeRuneInString(msg)
- for _, b := range []byte(string(r)) {
- if size > 1 {
- // If size > 1, r is not ascii. Always do percent encoding.
- buf.WriteString(fmt.Sprintf("%%%02X", b))
- continue
- }
-
- // The for loop is necessary even if size == 1. r could be
- // utf8.RuneError.
- //
- // fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
- if b >= spaceByte && b <= tildeByte && b != percentByte {
- buf.WriteByte(b)
- } else {
- buf.WriteString(fmt.Sprintf("%%%02X", b))
- }
+ lenMsg := len(msg)
+ for i := 0; i < lenMsg; i++ {
+ c := msg[i]
+ if c >= spaceByte && c < tildaByte && c != percentByte {
+ buf.WriteByte(c)
+ } else {
+ buf.WriteString(fmt.Sprintf("%%%02X", c))
}
- msg = msg[size:]
}
return buf.String()
}
@@ -544,80 +468,22 @@ func decodeGrpcMessageUnchecked(msg string) string {
return buf.String()
}
-type bufWriter struct {
- buf []byte
- offset int
- batchSize int
- conn net.Conn
- err error
-
- onFlush func()
-}
-
-func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
- return &bufWriter{
- buf: make([]byte, batchSize*2),
- batchSize: batchSize,
- conn: conn,
- }
-}
-
-func (w *bufWriter) Write(b []byte) (n int, err error) {
- if w.err != nil {
- return 0, w.err
- }
- if w.batchSize == 0 { // Buffer has been disabled.
- return w.conn.Write(b)
- }
- for len(b) > 0 {
- nn := copy(w.buf[w.offset:], b)
- b = b[nn:]
- w.offset += nn
- n += nn
- if w.offset >= w.batchSize {
- err = w.Flush()
- }
- }
- return n, err
-}
-
-func (w *bufWriter) Flush() error {
- if w.err != nil {
- return w.err
- }
- if w.offset == 0 {
- return nil
- }
- if w.onFlush != nil {
- w.onFlush()
- }
- _, w.err = w.conn.Write(w.buf[:w.offset])
- w.offset = 0
- return w.err
-}
-
type framer struct {
- writer *bufWriter
- fr *http2.Framer
+ numWriters int32
+ reader io.Reader
+ writer *bufio.Writer
+ fr *http2.Framer
}
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
- if writeBufferSize < 0 {
- writeBufferSize = 0
- }
- var r io.Reader = conn
- if readBufferSize > 0 {
- r = bufio.NewReaderSize(r, readBufferSize)
- }
- w := newBufWriter(conn, writeBufferSize)
+func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer {
f := &framer{
- writer: w,
- fr: http2.NewFramer(w, r),
+ reader: bufio.NewReaderSize(conn, readBufferSize),
+ writer: bufio.NewWriterSize(conn, writeBufferSize),
}
+ f.fr = http2.NewFramer(f.writer, f.reader)
// Opt-in to Frame reuse API on framer to reduce garbage.
// Frames aren't safe to read from after a subsequent call to ReadFrame.
f.fr.SetReuseFrames()
- f.fr.MaxHeaderListSize = maxHeaderListSize
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
return f
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go
index 879df80c4..ac8e358c5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/log.go
+++ b/vendor/google.golang.org/grpc/transport/log.go
@@ -42,3 +42,9 @@ func errorf(format string, args ...interface{}) {
grpclog.Errorf(format, args...)
}
}
+
+func fatalf(format string, args ...interface{}) {
+ if grpclog.V(logLevel) {
+ grpclog.Fatalf(format, args...)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go
index 1be518a62..2e7bcaeaa 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/transport/transport.go
@@ -19,17 +19,16 @@
// Package transport defines and implements message oriented communication
// channel to complete various transactions (e.g., an RPC). It is meant for
// grpc-internal usage and is not intended to be imported directly by users.
-package transport
+package transport // import "google.golang.org/grpc/transport"
import (
- "errors"
"fmt"
"io"
"net"
"sync"
- "sync/atomic"
"golang.org/x/net/context"
+ "golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/keepalive"
@@ -58,7 +57,6 @@ type recvBuffer struct {
c chan recvMsg
mu sync.Mutex
backlog []recvMsg
- err error
}
func newRecvBuffer() *recvBuffer {
@@ -70,13 +68,6 @@ func newRecvBuffer() *recvBuffer {
func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
- if b.err != nil {
- b.mu.Unlock()
- // An error had occurred earlier, don't accept more
- // data or errors.
- return
- }
- b.err = r.err
if len(b.backlog) == 0 {
select {
case b.c <- r:
@@ -110,15 +101,14 @@ func (b *recvBuffer) get() <-chan recvMsg {
return b.c
}
-//
// recvBufferReader implements io.Reader interface to read the data from
// recvBuffer.
type recvBufferReader struct {
- ctx context.Context
- ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
- recv *recvBuffer
- last []byte // Stores the remaining data in the previous calls.
- err error
+ ctx context.Context
+ goAway chan struct{}
+ recv *recvBuffer
+ last []byte // Stores the remaining data in the previous calls.
+ err error
}
// Read reads the next len(p) bytes from last. If last is drained, it tries to
@@ -140,8 +130,10 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
return copied, nil
}
select {
- case <-r.ctxDone:
+ case <-r.ctx.Done():
return 0, ContextErr(r.ctx.Err())
+ case <-r.goAway:
+ return 0, errStreamDrain
case m := <-r.recv.get():
r.recv.load()
if m.err != nil {
@@ -153,7 +145,61 @@ func (r *recvBufferReader) read(p []byte) (n int, err error) {
}
}
-type streamState uint32
+// All items in an out of a controlBuffer should be the same type.
+type item interface {
+ item()
+}
+
+// controlBuffer is an unbounded channel of item.
+type controlBuffer struct {
+ c chan item
+ mu sync.Mutex
+ backlog []item
+}
+
+func newControlBuffer() *controlBuffer {
+ b := &controlBuffer{
+ c: make(chan item, 1),
+ }
+ return b
+}
+
+func (b *controlBuffer) put(r item) {
+ b.mu.Lock()
+ if len(b.backlog) == 0 {
+ select {
+ case b.c <- r:
+ b.mu.Unlock()
+ return
+ default:
+ }
+ }
+ b.backlog = append(b.backlog, r)
+ b.mu.Unlock()
+}
+
+func (b *controlBuffer) load() {
+ b.mu.Lock()
+ if len(b.backlog) > 0 {
+ select {
+ case b.c <- b.backlog[0]:
+ b.backlog[0] = nil
+ b.backlog = b.backlog[1:]
+ default:
+ }
+ }
+ b.mu.Unlock()
+}
+
+// get returns the channel that receives an item in the buffer.
+//
+// Upon receipt of an item, the caller should call load to send another
+// item onto the channel if there is any.
+func (b *controlBuffer) get() <-chan item {
+ return b.c
+}
+
+type streamState uint8
const (
streamActive streamState = iota
@@ -168,68 +214,38 @@ type Stream struct {
st ServerTransport // nil for client side Stream
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
- done chan struct{} // closed at the end of stream to unblock writers. On the client side.
- ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
+ done chan struct{} // closed when the final status arrives
+ goAway chan struct{} // closed when a GOAWAY control message is received
method string // the associated RPC method of the stream
recvCompress string
sendCompress string
buf *recvBuffer
trReader io.Reader
fc *inFlow
- wq *writeQuota
+ recvQuota uint32
+ waiters waiters
// Callback to state application's intentions to read data. This
// is used to adjust flow control, if needed.
requestRead func(int)
- headerChan chan struct{} // closed to indicate the end of header metadata.
- headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
-
- // hdrMu protects header and trailer metadata on the server-side.
- hdrMu sync.Mutex
- header metadata.MD // the received header metadata.
- trailer metadata.MD // the key-value map of trailer metadata.
-
- noHeaders bool // set if the client never received headers (set only after the stream is done).
-
- // On the server-side, headerSent is atomically set to 1 when the headers are sent out.
- headerSent uint32
-
- state streamState
+ sendQuotaPool *quotaPool
+ headerChan chan struct{} // closed to indicate the end of header metadata.
+ headerDone bool // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ header metadata.MD // the received header metadata.
+ trailer metadata.MD // the key-value map of trailer metadata.
- // On client-side it is the status error received from the server.
- // On server-side it is unused.
- status *status.Status
+ mu sync.RWMutex // guard the following
+ headerOk bool // becomes true from the first header is about to send
+ state streamState
- bytesReceived uint32 // indicates whether any bytes have been received on this stream
- unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
+ status *status.Status // the status error received from the server
- // contentSubtype is the content-subtype for requests.
- // this must be lowercase or the behavior is undefined.
- contentSubtype string
-}
-
-// isHeaderSent is only valid on the server-side.
-func (s *Stream) isHeaderSent() bool {
- return atomic.LoadUint32(&s.headerSent) == 1
-}
+ rstStream bool // indicates whether a RST_STREAM frame needs to be sent
+ rstError http2.ErrCode // the error that needs to be sent along with the RST_STREAM frame
-// updateHeaderSent updates headerSent and returns true
-// if it was alreay set. It is valid only on server-side.
-func (s *Stream) updateHeaderSent() bool {
- return atomic.SwapUint32(&s.headerSent, 1) == 1
-}
-
-func (s *Stream) swapState(st streamState) streamState {
- return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st)))
-}
-
-func (s *Stream) compareAndSwapState(oldState, newState streamState) bool {
- return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState))
-}
-
-func (s *Stream) getState() streamState {
- return streamState(atomic.LoadUint32((*uint32)(&s.state)))
+ bytesReceived bool // indicates whether any bytes have been received on this stream
+ unprocessed bool // set if the server sends a refused stream or GOAWAY including this stream
}
func (s *Stream) waitOnHeader() error {
@@ -238,9 +254,12 @@ func (s *Stream) waitOnHeader() error {
// only after having received headers.
return nil
}
+ wc := s.waiters
select {
- case <-s.ctx.Done():
- return ContextErr(s.ctx.Err())
+ case <-wc.ctx.Done():
+ return ContextErr(wc.ctx.Err())
+ case <-wc.goAway:
+ return errStreamDrain
case <-s.headerChan:
return nil
}
@@ -260,12 +279,18 @@ func (s *Stream) SetSendCompress(str string) {
s.sendCompress = str
}
-// Done returns a channel which is closed when it receives the final status
+// Done returns a chanel which is closed when it receives the final status
// from the server.
func (s *Stream) Done() <-chan struct{} {
return s.done
}
+// GoAway returns a channel which is closed when the server sent GoAways signal
+// before this stream was initiated.
+func (s *Stream) GoAway() <-chan struct{} {
+ return s.goAway
+}
+
// Header acquires the key-value pairs of header metadata once it
// is available. It blocks until i) the metadata is ready or ii) there is no
// header metadata or iii) the stream is canceled/expired.
@@ -274,45 +299,26 @@ func (s *Stream) Header() (metadata.MD, error) {
// Even if the stream is closed, header is returned if available.
select {
case <-s.headerChan:
- if s.header == nil {
- return nil, nil
- }
return s.header.Copy(), nil
default:
}
return nil, err
}
-// TrailersOnly blocks until a header or trailers-only frame is received and
-// then returns true if the stream was trailers-only. If the stream ends
-// before headers are received, returns true, nil. If a context error happens
-// first, returns it as a status error. Client-side only.
-func (s *Stream) TrailersOnly() (bool, error) {
- err := s.waitOnHeader()
- if err != nil {
- return false, err
- }
- // if !headerDone, some other connection error occurred.
- return s.noHeaders && atomic.LoadUint32(&s.headerDone) == 1, nil
-}
-
// Trailer returns the cached trailer metedata. Note that if it is not called
// after the entire stream is done, it could return an empty MD. Client
// side only.
-// It can be safely read only after stream has ended that is either read
-// or write have returned io.EOF.
func (s *Stream) Trailer() metadata.MD {
+ s.mu.RLock()
c := s.trailer.Copy()
+ s.mu.RUnlock()
return c
}
-// ContentSubtype returns the content-subtype for a request. For example, a
-// content-subtype of "proto" will result in a content-type of
-// "application/grpc+proto". This will always be lowercase. See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-func (s *Stream) ContentSubtype() string {
- return s.contentSubtype
+// ServerTransport returns the underlying ServerTransport for the stream.
+// The client side stream always returns nil.
+func (s *Stream) ServerTransport() ServerTransport {
+ return s.st
}
// Context returns the context of the stream.
@@ -326,48 +332,36 @@ func (s *Stream) Method() string {
}
// Status returns the status received from the server.
-// Status can be read safely only after the stream has ended,
-// that is, after Done() is closed.
func (s *Stream) Status() *status.Status {
return s.status
}
// SetHeader sets the header metadata. This can be called multiple times.
// Server side only.
-// This should not be called in parallel to other data writes.
func (s *Stream) SetHeader(md metadata.MD) error {
+ s.mu.Lock()
+ if s.headerOk || s.state == streamDone {
+ s.mu.Unlock()
+ return ErrIllegalHeaderWrite
+ }
if md.Len() == 0 {
+ s.mu.Unlock()
return nil
}
- if s.isHeaderSent() || s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
s.header = metadata.Join(s.header, md)
- s.hdrMu.Unlock()
+ s.mu.Unlock()
return nil
}
-// SendHeader sends the given header metadata. The given metadata is
-// combined with any metadata set by previous calls to SetHeader and
-// then written to the transport stream.
-func (s *Stream) SendHeader(md metadata.MD) error {
- return s.st.WriteHeader(s, md)
-}
-
// SetTrailer sets the trailer metadata which will be sent with the RPC status
// by the server. This can be called multiple times. Server side only.
-// This should not be called parallel to other data writes.
func (s *Stream) SetTrailer(md metadata.MD) error {
if md.Len() == 0 {
return nil
}
- if s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
+ s.mu.Lock()
s.trailer = metadata.Join(s.trailer, md)
- s.hdrMu.Unlock()
+ s.mu.Unlock()
return nil
}
@@ -407,15 +401,29 @@ func (t *transportReader) Read(p []byte) (n int, err error) {
return
}
+// finish sets the stream's state and status, and closes the done channel.
+// s.mu must be held by the caller. st must always be non-nil.
+func (s *Stream) finish(st *status.Status) {
+ s.status = st
+ s.state = streamDone
+ close(s.done)
+}
+
// BytesReceived indicates whether any bytes have been received on this stream.
func (s *Stream) BytesReceived() bool {
- return atomic.LoadUint32(&s.bytesReceived) == 1
+ s.mu.Lock()
+ br := s.bytesReceived
+ s.mu.Unlock()
+ return br
}
// Unprocessed indicates whether the server did not process this stream --
// i.e. it sent a refused stream or GOAWAY including this stream ID.
func (s *Stream) Unprocessed() bool {
- return atomic.LoadUint32(&s.unprocessed) == 1
+ s.mu.Lock()
+ br := s.unprocessed
+ s.mu.Unlock()
+ return br
}
// GoString is implemented by Stream so context.String() won't
@@ -424,6 +432,21 @@ func (s *Stream) GoString() string {
return fmt.Sprintf("<stream: %p, %v>", s, s.method)
}
+// The key to save transport.Stream in the context.
+type streamKey struct{}
+
+// newContextWithStream creates a new context from ctx and attaches stream
+// to it.
+func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
+ return context.WithValue(ctx, streamKey{}, stream)
+}
+
+// StreamFromContext returns the stream saved in ctx.
+func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
+ s, ok = ctx.Value(streamKey{}).(*Stream)
+ return
+}
+
// state of transport
type transportState int
@@ -445,8 +468,6 @@ type ServerConfig struct {
InitialConnWindowSize int32
WriteBufferSize int
ReadBufferSize int
- ChannelzParentID int64
- MaxHeaderListSize *uint32
}
// NewServerTransport creates a ServerTransport with conn or non-nil error
@@ -459,18 +480,17 @@ func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (S
type ConnectOptions struct {
// UserAgent is the application user agent.
UserAgent string
+ // Authority is the :authority pseudo-header to use. This field has no effect if
+ // TransportCredentials is set.
+ Authority string
// Dialer specifies how to dial a network address.
Dialer func(context.Context, string) (net.Conn, error)
// FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors.
FailOnNonTempDialError bool
// PerRPCCredentials stores the PerRPCCredentials required to issue RPCs.
PerRPCCredentials []credentials.PerRPCCredentials
- // TransportCredentials stores the Authenticator required to setup a client
- // connection. Only one of TransportCredentials and CredsBundle is non-nil.
+ // TransportCredentials stores the Authenticator required to setup a client connection.
TransportCredentials credentials.TransportCredentials
- // CredsBundle is the credentials bundle to be used. Only one of
- // TransportCredentials and CredsBundle is non-nil.
- CredsBundle credentials.Bundle
// KeepaliveParams stores the keepalive parameters.
KeepaliveParams keepalive.ClientParameters
// StatsHandler stores the handler for stats.
@@ -483,10 +503,6 @@ type ConnectOptions struct {
WriteBufferSize int
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
ReadBufferSize int
- // ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
- ChannelzParentID int64
- // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
- MaxHeaderListSize *uint32
}
// TargetInfo contains the information of the target such as network address and metadata.
@@ -498,8 +514,8 @@ type TargetInfo struct {
// NewClientTransport establishes the transport with the required ConnectOptions
// and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
- return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess, onGoAway, onClose)
+func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) {
+ return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess)
}
// Options provides additional hints and information for message
@@ -508,6 +524,11 @@ type Options struct {
// Last indicates whether this write is the last piece for
// this stream.
Last bool
+
+ // Delay is a hint to the transport implementation for whether
+ // the data could be buffered for a batching write. The
+ // transport implementation may ignore the hint.
+ Delay bool
}
// CallHdr carries the information of a particular RPC.
@@ -525,15 +546,13 @@ type CallHdr struct {
// Creds specifies credentials.PerRPCCredentials for a call.
Creds credentials.PerRPCCredentials
- // ContentSubtype specifies the content-subtype for a request. For example, a
- // content-subtype of "proto" will result in a content-type of
- // "application/grpc+proto". The value of ContentSubtype must be all
- // lowercase, otherwise the behavior is undefined. See
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
- // for more details.
- ContentSubtype string
-
- PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
+ // Flush indicates whether a new stream command should be sent
+ // to the peer without waiting for the first data. This is
+ // only a hint.
+ // If it's true, the transport may modify the flush decision
+ // for performance purposes.
+ // If it's false, new stream will never be flushed.
+ Flush bool
}
// ClientTransport is the common interface for all gRPC client-side transport
@@ -575,12 +594,6 @@ type ClientTransport interface {
// GetGoAwayReason returns the reason why GoAway frame was received.
GetGoAwayReason() GoAwayReason
-
- // IncrMsgSent increments the number of message sent through this transport.
- IncrMsgSent()
-
- // IncrMsgRecv increments the number of message received through this transport.
- IncrMsgRecv()
}
// ServerTransport is the common interface for all gRPC server-side transport
@@ -614,12 +627,14 @@ type ServerTransport interface {
// Drain notifies the client this ServerTransport stops accepting new RPCs.
Drain()
+}
- // IncrMsgSent increments the number of message sent through this transport.
- IncrMsgSent()
-
- // IncrMsgRecv increments the number of message received through this transport.
- IncrMsgRecv()
+// streamErrorf creates an StreamError with the specified error code and description.
+func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
+ return StreamError{
+ Code: c,
+ Desc: fmt.Sprintf(format, a...),
+ }
}
// connectionErrorf creates an ConnectionError with the specified error description.
@@ -661,18 +676,36 @@ func (e ConnectionError) Origin() error {
var (
// ErrConnClosing indicates that the transport is closing.
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
- // errStreamDrain indicates that the stream is rejected because the
- // connection is draining. This could be caused by goaway or balancer
- // removing the address.
- errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indiacte application
- // layer of an error.
- errStreamDone = errors.New("the stream is done")
+ // errStreamDrain indicates that the stream is rejected by the server because
+ // the server stops accepting new RPCs.
+ // TODO: delete this error; it is no longer necessary.
+ errStreamDrain = streamErrorf(codes.Unavailable, "the server stops accepting new RPCs")
// StatusGoAway indicates that the server sent a GOAWAY that included this
// stream's ID in unprocessed RPCs.
- statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection")
+ statusGoAway = status.New(codes.Unavailable, "the server stopped accepting new RPCs")
)
+// TODO: See if we can replace StreamError with status package errors.
+
+// StreamError is an error that only affects one stream within a connection.
+type StreamError struct {
+ Code codes.Code
+ Desc string
+}
+
+func (e StreamError) Error() string {
+ return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc)
+}
+
+// waiters are passed to quotaPool get methods to
+// wait on in addition to waiting on quota.
+type waiters struct {
+ ctx context.Context
+ tctx context.Context
+ done chan struct{}
+ goAway chan struct{}
+}
+
// GoAwayReason contains the reason for the GoAway frame received.
type GoAwayReason uint8
@@ -687,26 +720,38 @@ const (
GoAwayTooManyPings GoAwayReason = 2
)
-// channelzData is used to store channelz related data for http2Client and http2Server.
-// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
- kpCount int64
- // The number of streams that have started, including already finished ones.
- streamsStarted int64
- // Client side: The number of streams that have ended successfully by receiving
- // EoS bit set frame from server.
- // Server side: The number of streams that have ended successfully by sending
- // frame with EoS bit set.
- streamsSucceeded int64
- streamsFailed int64
- // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
- // instead of time.Time since it's more costly to atomically update time.Time variable than int64
- // variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
- lastStreamCreatedTime int64
- msgSent int64
- msgRecv int64
- lastMsgSentTime int64
- lastMsgRecvTime int64
+// loopyWriter is run in a separate go routine. It is the single code path that will
+// write data on wire.
+func loopyWriter(ctx context.Context, cbuf *controlBuffer, handler func(item) error) {
+ for {
+ select {
+ case i := <-cbuf.get():
+ cbuf.load()
+ if err := handler(i); err != nil {
+ errorf("transport: Error while handling item. Err: %v", err)
+ return
+ }
+ case <-ctx.Done():
+ return
+ }
+ hasData:
+ for {
+ select {
+ case i := <-cbuf.get():
+ cbuf.load()
+ if err := handler(i); err != nil {
+ errorf("transport: Error while handling item. Err: %v", err)
+ return
+ }
+ case <-ctx.Done():
+ return
+ default:
+ if err := handler(&flushIO{}); err != nil {
+ errorf("transport: Error while flushing. Err: %v", err)
+ return
+ }
+ break hasData
+ }
+ }
+ }
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
deleted file mode 100644
index d8e0287ef..000000000
--- a/vendor/google.golang.org/grpc/version.go
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-// Version is the current grpc version.
-const Version = "1.16.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
index eb3287036..2ad94fed9 100755
--- a/vendor/google.golang.org/grpc/vet.sh
+++ b/vendor/google.golang.org/grpc/vet.sh
@@ -1,10 +1,5 @@
#!/bin/bash
-if [[ `uname -a` = *"Darwin"* ]]; then
- echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047"
- exit 1
-fi
-
set -ex # Exit on error; debugging enabled.
set -o pipefail # Fail a pipe if any sub-command fails.
@@ -13,46 +8,24 @@ die() {
exit 1
}
-# Check to make sure it's safe to modify the user's git repo.
-if git status --porcelain | read; then
- die "Uncommitted or untracked files found; commit changes first"
-fi
+PATH="$GOPATH/bin:$GOROOT/bin:$PATH"
-if [[ -d "${GOPATH}/src" ]]; then
- die "\${GOPATH}/src (${GOPATH}/src) exists; this script will delete it."
+# Check proto in manual runs or cron runs.
+if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then
+ check_proto="true"
fi
-# Undo any edits made by this script.
-cleanup() {
- rm -rf "${GOPATH}/src"
- git reset --hard HEAD
-}
-trap cleanup EXIT
-
-PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"
-
-if [[ "$1" = "-install" ]]; then
- # Check for module support
- if go help mod >& /dev/null; then
- go install \
- github.com/golang/lint/golint \
- golang.org/x/tools/cmd/goimports \
- honnef.co/go/tools/cmd/staticcheck \
- github.com/client9/misspell/cmd/misspell \
- github.com/golang/protobuf/protoc-gen-go
- else
- # Ye olde `go get` incantation.
- # Note: this gets the latest version of all tools (vs. the pinned versions
- # with Go modules).
- go get -u \
- github.com/golang/lint/golint \
- golang.org/x/tools/cmd/goimports \
- honnef.co/go/tools/cmd/staticcheck \
- github.com/client9/misspell/cmd/misspell \
- github.com/golang/protobuf/protoc-gen-go
- fi
- if [[ -z "${VET_SKIP_PROTO}" ]]; then
- if [[ "${TRAVIS}" = "true" ]]; then
+if [ "$1" = "-install" ]; then
+ go get -d \
+ google.golang.org/grpc/...
+ go get -u \
+ github.com/golang/lint/golint \
+ golang.org/x/tools/cmd/goimports \
+ honnef.co/go/tools/cmd/staticcheck \
+ github.com/client9/misspell/cmd/misspell \
+ github.com/golang/protobuf/protoc-gen-go
+ if [[ "$check_proto" = "true" ]]; then
+ if [[ "$TRAVIS" = "true" ]]; then
PROTOBUF_VERSION=3.3.0
PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
pushd /home/travis
@@ -69,61 +42,43 @@ elif [[ "$#" -ne 0 ]]; then
die "Unknown argument(s): $*"
fi
+# TODO: Remove this check and the mangling below once "context" is imported
+# directly.
+if git status --porcelain | read; then
+ die "Uncommitted or untracked files found; commit changes first"
+fi
+
git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read)
-git ls-files "*.go" | xargs grep -l '"math/rand"' 2>&1 | (! grep -v '^examples\|^stress\|grpcrand') | tee /dev/stderr | (! read)
-git ls-files | xargs dirname | sort | uniq | xargs go run test/go_vet/vet.go | tee /dev/stderr | (! read)
gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read)
goimports -l . 2>&1 | tee /dev/stderr | (! read)
golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read)
+# Undo any edits made by this script.
+cleanup() {
+ git reset --hard HEAD
+}
+trap cleanup EXIT
+
# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484).
# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711).
git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":'
-set +o pipefail # vet exits with non-zero error if issues are found
-
-# TODO(deklerk) remove when we drop Go 1.6 support
-go tool vet -all . 2>&1 | \
- grep -vE 'clientconn.go:.*cancel (function|var)' | \
- grep -vE '.*transport_test.go:.*cancel' | \
- tee /dev/stderr | \
- (! read)
-
+set +o pipefail
+# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed.
+go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read)
set -o pipefail
git reset --hard HEAD
-if [[ -z "${VET_SKIP_PROTO}" ]]; then
- PATH="/home/travis/bin:${PATH}" make proto && \
+if [[ "$check_proto" = "true" ]]; then
+ PATH="/home/travis/bin:$PATH" make proto && \
git status --porcelain 2>&1 | (! read) || \
(git status; git --no-pager diff; exit 1)
fi
-if go help mod >& /dev/null; then
- go mod tidy && \
- git status --porcelain 2>&1 | (! read) || \
- (git status; git --no-pager diff; exit 1)
-fi
-
-### HACK HACK HACK: Remove once staticcheck works with modules.
-# Make a symlink in ${GOPATH}/src to its ${GOPATH}/pkg/mod equivalent for every package we use.
-for x in $(find "${GOPATH}/pkg/mod" -name '*@*' | grep -v \/mod\/cache\/); do
- pkg="$(echo ${x#"${GOPATH}/pkg/mod/"} | cut -f1 -d@)";
- # If multiple versions exist, just use the existing one.
- if [[ -L "${GOPATH}/src/${pkg}" ]]; then continue; fi
- mkdir -p "$(dirname "${GOPATH}/src/${pkg}")";
- ln -s $x "${GOPATH}/src/${pkg}";
-done
-### END HACK HACK HACK
-
# TODO(menghanl): fix errors in transport_test.
staticcheck -ignore '
-internal/transport/transport_test.go:SA2002
-benchmark/benchmain/main.go:SA1019
-stats/stats_test.go:SA1019
-test/end2end_test.go:SA1019
-balancer_test.go:SA1019
-balancer.go:SA1019
-clientconn_test.go:SA1019
-internal/transport/handler_server_test.go:SA1019
-internal/transport/handler_server.go:SA1019
+google.golang.org/grpc/transport/transport_test.go:SA2002
+google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
+google.golang.org/grpc/stats/stats_test.go:SA1019
+google.golang.org/grpc/test/end2end_test.go:SA1019
' ./...
misspell -error .
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 64c2329e5..bf5312a38 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -107,52 +107,68 @@
"versionExact": "v1.2.0"
},
{
- "checksumSHA1": "ZRhE1BjkcaROD1NZMZwICtPemTs=",
+ "checksumSHA1": "5TKR3lamABvUhxkopYnphszS+Xc=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware",
- "revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
- "revisionTime": "2018-11-12T10:25:10Z"
+ "revision": "c250d6563d4d4c20252cd865923440e829844f4e",
+ "revisionTime": "2018-05-02T09:16:42Z",
+ "version": "v1",
+ "versionExact": "v1.0.0"
},
{
"checksumSHA1": "tvOR7YKj51rBR+j5C/ZyZj6rvYc=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware/auth",
- "revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
- "revisionTime": "2018-11-12T10:25:10Z"
+ "revision": "c250d6563d4d4c20252cd865923440e829844f4e",
+ "revisionTime": "2018-05-02T09:16:42Z",
+ "version": "v1",
+ "versionExact": "v1.0.0"
},
{
"checksumSHA1": "YdjXbik3OwU+YwQL7OAzwqAAUjo=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware/logging",
- "revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
- "revisionTime": "2018-11-12T10:25:10Z"
+ "revision": "c250d6563d4d4c20252cd865923440e829844f4e",
+ "revisionTime": "2018-05-02T09:16:42Z",
+ "version": "v1",
+ "versionExact": "v1.0.0"
},
{
"checksumSHA1": "sRalD4xQkkeydbEmfGhdVC6iy94=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus",
- "revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
- "revisionTime": "2018-11-12T10:25:10Z"
+ "revision": "c250d6563d4d4c20252cd865923440e829844f4e",
+ "revisionTime": "2018-05-02T09:16:42Z",
+ "version": "v1",
+ "versionExact": "v1.0.0"
},
{
"checksumSHA1": "lyjaDmKU/dk/K4ciP5OUJEzmjfA=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus",
- "revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
- "revisionTime": "2018-11-12T10:25:10Z"
+ "revision": "c250d6563d4d4c20252cd865923440e829844f4e",
+ "revisionTime": "2018-05-02T09:16:42Z",
+ "version": "v1",
+ "versionExact": "v1.0.0"
},
{
"checksumSHA1": "Rf3QgJeAX2809t/DZvMjZbGHe9U=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware/tags",
- "revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
- "revisionTime": "2018-11-12T10:25:10Z"
+ "revision": "c250d6563d4d4c20252cd865923440e829844f4e",
+ "revisionTime": "2018-05-02T09:16:42Z",
+ "version": "v1",
+ "versionExact": "v1.0.0"
},
{
"checksumSHA1": "g5zvkcjKi3oc+BXuqJfMfG2tP+c=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware/tags/logrus",
- "revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
- "revisionTime": "2018-11-12T10:25:10Z"
+ "revision": "c250d6563d4d4c20252cd865923440e829844f4e",
+ "revisionTime": "2018-05-02T09:16:42Z",
+ "version": "v1",
+ "versionExact": "v1.0.0"
},
{
"checksumSHA1": "L5z1C445GhhQmDKSisTFv754LdU=",
"path": "github.com/grpc-ecosystem/go-grpc-middleware/util/metautils",
- "revision": "3304cc8863525cd0b328fbfd5bf745bbd38e7106",
- "revisionTime": "2018-11-12T10:25:10Z"
+ "revision": "c250d6563d4d4c20252cd865923440e829844f4e",
+ "revisionTime": "2018-05-02T09:16:42Z",
+ "version": "v1",
+ "versionExact": "v1.0.0"
},
{
"checksumSHA1": "3iVD2sJv4uYnA8YgkR8yzZiUF7o=",
@@ -421,276 +437,212 @@
"versionExact": "master"
},
{
- "checksumSHA1": "O6SQTcVdhL+4betKp/7ketCc/AU=",
+ "checksumSHA1": "LXTQppZOmpZb8/zNBzfXmq3GDEg=",
"path": "google.golang.org/grpc",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "9KEKKMRAdFnz2sMBXbb33ZLS8Oo=",
+ "checksumSHA1": "xBhmO0Vn4kzbmySioX+2gBImrkk=",
"path": "google.golang.org/grpc/balancer",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "lw+L836hLeH8+//le+C+ycddCCU=",
+ "checksumSHA1": "CPWX/IgaQSR3+78j4sPrvHNkW+U=",
"path": "google.golang.org/grpc/balancer/base",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
"checksumSHA1": "DJ1AtOk4Pu7bqtUMob95Hw8HPNw=",
"path": "google.golang.org/grpc/balancer/roundrobin",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "YyTUFAVju8wgb1s/3azC2CeSbfY=",
- "path": "google.golang.org/grpc/binarylog/grpc_binarylog_v1",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "R3tuACGAPyK4lr+oSNt1saUzC0M=",
+ "checksumSHA1": "bfmh2m3qW8bb6qpfS/D4Wcl4hZE=",
"path": "google.golang.org/grpc/codes",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
"checksumSHA1": "XH2WYcDNwVO47zYShREJjcYXm0Y=",
"path": "google.golang.org/grpc/connectivity",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "5r6NIQY1c3NjwLtxUOo/BcUOqFo=",
+ "checksumSHA1": "4DnDX81AOSyVP3UJ5tQmlNcG1MI=",
"path": "google.golang.org/grpc/credentials",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "cfLb+pzWB+Glwp82rgfcEST1mv8=",
+ "checksumSHA1": "9DImIDqmAMPO24loHJ77UVJTDxQ=",
"path": "google.golang.org/grpc/encoding",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "LKKkn7EYA+Do9Qwb2/SUKLFNxoo=",
- "path": "google.golang.org/grpc/encoding/proto",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "checksumSHA1": "H7SuPUqbPcdbNqgl+k3ohuwMAwE=",
+ "path": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "ZPPSFisPDz2ANO4FBZIft+fRxyk=",
+ "checksumSHA1": "ntHev01vgZgeIh5VFRmbLx/BSTo=",
"path": "google.golang.org/grpc/grpclog",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "fVF/iXu9EnWIUmX9wrvBkzF0ydc=",
+ "checksumSHA1": "DyM0uqLtknaI4THSc3spn9XlL+g=",
"path": "google.golang.org/grpc/health",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "KfgIKMqGJ8FdFbWlGDsnmrCY7eE=",
+ "checksumSHA1": "6vY7tYjV84pnr3sDctzx53Bs8b0=",
"path": "google.golang.org/grpc/health/grpc_health_v1",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "LVvnj/+AVrdZMDw0DZ8D/vI24+M=",
+ "checksumSHA1": "Qvf3zdmRCSsiM/VoBv0qB/naHtU=",
"path": "google.golang.org/grpc/internal",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "uDJA7QK2iGnEwbd9TPqkLaM+xuU=",
- "path": "google.golang.org/grpc/internal/backoff",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "IfAvyAy406VPd5mY36DuN/+d8x8=",
- "path": "google.golang.org/grpc/internal/binarylog",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "V6eyqZJfYh+cX+I/AxPVjkQLjTM=",
- "path": "google.golang.org/grpc/internal/channelz",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "5dFUCEaPjKwza9kwKqgljp8ckU4=",
- "path": "google.golang.org/grpc/internal/envconfig",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "70gndc/uHwyAl3D45zqp7vyHWlo=",
- "path": "google.golang.org/grpc/internal/grpcrand",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "BG0q6ajST8+ns7FtzAYthNKgYLM=",
- "path": "google.golang.org/grpc/internal/grpcsync",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "ziVpjOqXLZbsDnhW0gJLhteVIms=",
- "path": "google.golang.org/grpc/internal/syscall",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
- },
- {
- "checksumSHA1": "0r7S4jTgUIatKqL/8ra0J7Q5iO0=",
- "path": "google.golang.org/grpc/internal/transport",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "350+v+N+AuknxomqjND19nR969g=",
+ "checksumSHA1": "hcuHgKp8W0wIzoCnNfKI8NUss5o=",
"path": "google.golang.org/grpc/keepalive",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "OjIAi5AzqlQ7kLtdAyjvdgMf6hc=",
+ "checksumSHA1": "KeUmTZV+2X46C49cKyjp+xM7fvw=",
"path": "google.golang.org/grpc/metadata",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "VvGBoawND0urmYDy11FT+U1IHtU=",
+ "checksumSHA1": "5dwF592DPvhF2Wcex3m7iV6aGRQ=",
"path": "google.golang.org/grpc/naming",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
"checksumSHA1": "n5EgDdBqFMa2KQFhtl+FF/4gIFo=",
"path": "google.golang.org/grpc/peer",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "780k7ZcT5M32PTx7AmxkxMlZ/Wk=",
+ "checksumSHA1": "JF/KBFCo5JwVtXfrZ2kJnFRC6W8=",
"path": "google.golang.org/grpc/reflection",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "wvvOSwLYzCHjvTTKFyG2HTD3V+o=",
+ "checksumSHA1": "7Ax2K0St9CIi1rkA9Ju+2ERfe9E=",
"path": "google.golang.org/grpc/reflection/grpc_reflection_v1alpha",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "GEq6wwE1qWLmkaM02SjxBmmnHDo=",
+ "checksumSHA1": "y8Ta+ctMP9CUTiPyPyxiD154d8w=",
"path": "google.golang.org/grpc/resolver",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "grHAHa6Fi3WBsXJpmlEOlRbWWVg=",
+ "checksumSHA1": "WpWF+bDzObsHf+bjoGpb/abeFxo=",
"path": "google.golang.org/grpc/resolver/dns",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
"checksumSHA1": "zs9M4xE8Lyg4wvuYvR00XoBxmuw=",
"path": "google.golang.org/grpc/resolver/passthrough",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "YclPgme2gT3S0hTkHVdE1zAxJdo=",
+ "checksumSHA1": "G9lgXNi7qClo5sM2s6TbTHLFR3g=",
"path": "google.golang.org/grpc/stats",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
- "checksumSHA1": "hFyBO5vgsMamKhUOSyPCqROk1vo=",
+ "checksumSHA1": "tUo+M0Cb0W9ZEIt5BH30wJz/Kjc=",
"path": "google.golang.org/grpc/status",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
},
{
"checksumSHA1": "qvArRhlrww5WvRmbyMF2mUfbJew=",
"path": "google.golang.org/grpc/tap",
- "revision": "2e463a05d100327ca47ac218281906921038fd95",
- "revisionTime": "2018-10-23T17:37:47Z",
- "version": "v1.16.0",
- "versionExact": "v1.16.0"
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
+ },
+ {
+ "checksumSHA1": "4PldZ/0JjX6SpJYaMByY1ozywnY=",
+ "path": "google.golang.org/grpc/transport",
+ "revision": "7cea4cc846bcf00cbb27595b07da5de875ef7de9",
+ "revisionTime": "2018-01-08T22:01:35Z",
+ "version": "v1.9.1",
+ "versionExact": "v1.9.1"
}
],
"rootPath": "gitlab.com/gitlab-org/gitaly"