Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-pages.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Thomas <nick@gitlab.com>2018-08-23 15:58:57 +0300
committerNick Thomas <nick@gitlab.com>2018-08-23 15:58:57 +0300
commitdba950f3d7acb66ec14e366e96e4394453d4a948 (patch)
treef66e502d2849752c4080a339a0141843f76a2ff0
parent75fbbcb9b0cc04caaf9b35ceee620a85ace82e51 (diff)
parentdcc9d46164598e0008899ef94cca95d688c58af7 (diff)
Merge branch 'bjk/update_prom_vendor' into 'master'
Update Prometheus vendoring. See merge request gitlab-org/gitlab-pages!105
-rw-r--r--vendor/github.com/beorn7/perks/quantile/stream.go34
-rw-r--r--vendor/github.com/golang/protobuf/jsonpb/jsonpb.go188
-rw-r--r--vendor/github.com/golang/protobuf/proto/Makefile43
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go46
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go780
-rw-r--r--vendor/github.com/golang/protobuf/proto/discard.go350
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go1190
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go38
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go205
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go71
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go81
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go595
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go366
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go434
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_marshal.go2681
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_merge.go654
-rw-r--r--vendor/github.com/golang/protobuf/proto/table_unmarshal.go1967
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go61
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go87
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go55
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto6
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go51
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go47
-rwxr-xr-xvendor/github.com/golang/protobuf/ptypes/regen.sh43
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go140
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go53
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/collector.go39
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go191
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go37
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/doc.go94
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge.go204
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go61
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go147
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http.go83
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/labels.go57
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/metric.go34
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/observer.go52
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/process_collector.go127
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go199
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go181
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go44
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go162
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go97
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go144
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go447
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go579
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary.go185
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/timer.go51
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/untyped.go102
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go82
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go494
-rw-r--r--vendor/github.com/prometheus/client_model/go/metrics.pb.go381
-rw-r--r--vendor/github.com/prometheus/common/expfmt/decode.go47
-rw-r--r--vendor/github.com/prometheus/common/expfmt/expfmt.go12
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_create.go7
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go12
-rw-r--r--vendor/github.com/prometheus/common/model/labels.go12
-rw-r--r--vendor/github.com/prometheus/common/model/labelset.go2
-rw-r--r--vendor/github.com/prometheus/common/model/metric.go11
-rw-r--r--vendor/github.com/prometheus/common/model/silence.go4
-rw-r--r--vendor/github.com/prometheus/common/model/time.go17
-rw-r--r--vendor/github.com/prometheus/common/model/value.go25
-rw-r--r--vendor/github.com/prometheus/procfs/AUTHORS.md20
-rw-r--r--vendor/github.com/prometheus/procfs/CONTRIBUTING.md6
-rw-r--r--vendor/github.com/prometheus/procfs/MAINTAINERS.md1
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile83
-rw-r--r--vendor/github.com/prometheus/procfs/README.md1
-rw-r--r--vendor/github.com/prometheus/procfs/buddyinfo.go95
-rw-r--r--vendor/github.com/prometheus/procfs/fixtures.ttar446
-rw-r--r--vendor/github.com/prometheus/procfs/fs.go49
-rw-r--r--vendor/github.com/prometheus/procfs/internal/util/parse.go46
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs.go69
-rw-r--r--vendor/github.com/prometheus/procfs/mdstat.go13
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats.go606
-rw-r--r--vendor/github.com/prometheus/procfs/net_dev.go216
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/nfs.go263
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/parse.go317
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/parse_nfs.go67
-rw-r--r--vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go89
-rw-r--r--vendor/github.com/prometheus/procfs/proc.go28
-rw-r--r--vendor/github.com/prometheus/procfs/proc_io.go18
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits.go51
-rw-r--r--vendor/github.com/prometheus/procfs/proc_ns.go68
-rw-r--r--vendor/github.com/prometheus/procfs/proc_stat.go13
-rw-r--r--vendor/github.com/prometheus/procfs/stat.go206
-rwxr-xr-xvendor/github.com/prometheus/procfs/ttar389
-rw-r--r--vendor/github.com/prometheus/procfs/xfrm.go187
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/parse.go330
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/xfs.go163
-rw-r--r--vendor/vendor.json127
91 files changed, 14200 insertions, 4458 deletions
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
index f4cabd66..d7d14f8e 100644
--- a/vendor/github.com/beorn7/perks/quantile/stream.go
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream {
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
-func NewTargeted(targets map[float64]float64) *Stream {
+func NewTargeted(targetMap map[float64]float64) *Stream {
+ // Convert map to slice to avoid slow iterations on a map.
+ // ƒ is called on the hot path, so converting the map to a slice
+ // beforehand results in significant CPU savings.
+ targets := targetMapToSlice(targetMap)
+
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
- for quantile, epsilon := range targets {
- if quantile*s.n <= r {
- f = (2 * epsilon * r) / quantile
+ for _, t := range targets {
+ if t.quantile*s.n <= r {
+ f = (2 * t.epsilon * r) / t.quantile
} else {
- f = (2 * epsilon * (s.n - r)) / (1 - quantile)
+ f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
@@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream {
return newStream(ƒ)
}
+type target struct {
+ quantile float64
+ epsilon float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+ targets := make([]target, 0, len(targetMap))
+
+ for quantile, epsilon := range targetMap {
+ t := target{
+ quantile: quantile,
+ epsilon: epsilon,
+ }
+ targets = append(targets, t)
+ }
+
+ return targets
+}
+
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
index 110ae138..ff368f33 100644
--- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -56,6 +56,8 @@ import (
stpb "github.com/golang/protobuf/ptypes/struct"
)
+const secondInNanos = int64(time.Second / time.Nanosecond)
+
// Marshaler is a configurable object for converting between
// protocol buffer objects and a JSON representation for them.
type Marshaler struct {
@@ -118,6 +120,14 @@ type JSONPBUnmarshaler interface {
// Marshal marshals a protocol buffer into JSON.
func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
+ v := reflect.ValueOf(pb)
+ if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return errors.New("Marshal called with nil")
+ }
+ // Check for unset required fields first.
+ if err := checkRequiredFields(pb); err != nil {
+ return err
+ }
writer := &errWriter{writer: out}
return m.marshalObject(writer, pb, "", "")
}
@@ -190,13 +200,22 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU
// Any is a bit more involved.
return m.marshalAny(out, v, indent)
case "Duration":
- // "Generated output always contains 3, 6, or 9 fractional digits,
+ // "Generated output always contains 0, 3, 6, or 9 fractional digits,
// depending on required precision."
s, ns := s.Field(0).Int(), s.Field(1).Int()
- d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond
- x := fmt.Sprintf("%.9f", d.Seconds())
+ if ns <= -secondInNanos || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+ }
+ if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+ return errors.New("signs of seconds and nanos do not match")
+ }
+ if s < 0 {
+ ns = -ns
+ }
+ x := fmt.Sprintf("%d.%09d", s, ns)
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
out.write(`"`)
out.write(x)
out.write(`s"`)
@@ -207,13 +226,17 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU
return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
case "Timestamp":
// "RFC 3339, where generated output will always be Z-normalized
- // and uses 3, 6 or 9 fractional digits."
+ // and uses 0, 3, 6 or 9 fractional digits."
s, ns := s.Field(0).Int(), s.Field(1).Int()
+ if ns < 0 || ns >= secondInNanos {
+ return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+ }
t := time.Unix(s, ns).UTC()
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
x := t.Format("2006-01-02T15:04:05.000000000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
+ x = strings.TrimSuffix(x, ".000")
out.write(`"`)
out.write(x)
out.write(`Z"`)
@@ -632,7 +655,10 @@ func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
if err := dec.Decode(&inputValue); err != nil {
return err
}
- return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil)
+ if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
+ return err
+ }
+ return checkRequiredFields(pb)
}
// Unmarshal unmarshals a JSON object stream into a protocol
@@ -803,7 +829,7 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe
return fmt.Errorf("bad ListValue: %v", err)
}
- target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s), len(s))))
+ target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
for i, sv := range s {
if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
return err
@@ -973,13 +999,6 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe
}
if mp != nil {
target.Set(reflect.MakeMap(targetType))
- var keyprop, valprop *proto.Properties
- if prop != nil {
- // These could still be nil if the protobuf metadata is broken somehow.
- // TODO: This won't work because the fields are unexported.
- // We should probably just reparse them.
- //keyprop, valprop = prop.mkeyprop, prop.mvalprop
- }
for ks, raw := range mp {
// Unmarshal map key. The core json library already decoded the key into a
// string, so we handle that specially. Other types were quoted post-serialization.
@@ -988,14 +1007,16 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe
k = reflect.ValueOf(ks)
} else {
k = reflect.New(targetType.Key()).Elem()
- if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil {
+ // TODO: pass the correct Properties if needed.
+ if err := u.unmarshalValue(k, json.RawMessage(ks), nil); err != nil {
return err
}
}
// Unmarshal map value.
v := reflect.New(targetType.Elem()).Elem()
- if err := u.unmarshalValue(v, raw, valprop); err != nil {
+ // TODO: pass the correct Properties if needed.
+ if err := u.unmarshalValue(v, raw, nil); err != nil {
return err
}
target.SetMapIndex(k, v)
@@ -1081,3 +1102,140 @@ func (s mapKeys) Less(i, j int) bool {
}
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
}
+
+// checkRequiredFields returns an error if any required field in the given proto message is not set.
+// This function is used by both Marshal and Unmarshal. While required fields only exist in a
+// proto2 message, a proto3 message can contain proto2 message(s).
+func checkRequiredFields(pb proto.Message) error {
+ // Most well-known type messages do not contain required fields. The "Any" type may contain
+ // a message that has required fields.
+ //
+ // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
+ // field in order to transform that into JSON, and that should have returned an error if a
+ // required field is not set in the embedded message.
+ //
+ // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
+ // embedded message to store the serialized message in Any.Value field, and that should have
+ // returned an error if a required field is not set.
+ if _, ok := pb.(wkt); ok {
+ return nil
+ }
+
+ v := reflect.ValueOf(pb)
+ // Skip message if it is not a struct pointer.
+ if v.Kind() != reflect.Ptr {
+ return nil
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return nil
+ }
+
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ sfield := v.Type().Field(i)
+
+ if sfield.PkgPath != "" {
+ // blank PkgPath means the field is exported; skip if not exported
+ continue
+ }
+
+ if strings.HasPrefix(sfield.Name, "XXX_") {
+ continue
+ }
+
+ // Oneof field is an interface implemented by wrapper structs containing the actual oneof
+ // field, i.e. an interface containing &T{real_value}.
+ if sfield.Tag.Get("protobuf_oneof") != "" {
+ if field.Kind() != reflect.Interface {
+ continue
+ }
+ v := field.Elem()
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ continue
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct || v.NumField() < 1 {
+ continue
+ }
+ field = v.Field(0)
+ sfield = v.Type().Field(0)
+ }
+
+ protoTag := sfield.Tag.Get("protobuf")
+ if protoTag == "" {
+ continue
+ }
+ var prop proto.Properties
+ prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
+
+ switch field.Kind() {
+ case reflect.Map:
+ if field.IsNil() {
+ continue
+ }
+ // Check each map value.
+ keys := field.MapKeys()
+ for _, k := range keys {
+ v := field.MapIndex(k)
+ if err := checkRequiredFieldsInValue(v); err != nil {
+ return err
+ }
+ }
+ case reflect.Slice:
+ // Handle non-repeated type, e.g. bytes.
+ if !prop.Repeated {
+ if prop.Required && field.IsNil() {
+ return fmt.Errorf("required field %q is not set", prop.Name)
+ }
+ continue
+ }
+
+ // Handle repeated type.
+ if field.IsNil() {
+ continue
+ }
+ // Check each slice item.
+ for i := 0; i < field.Len(); i++ {
+ v := field.Index(i)
+ if err := checkRequiredFieldsInValue(v); err != nil {
+ return err
+ }
+ }
+ case reflect.Ptr:
+ if field.IsNil() {
+ if prop.Required {
+ return fmt.Errorf("required field %q is not set", prop.Name)
+ }
+ continue
+ }
+ if err := checkRequiredFieldsInValue(field); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Handle proto2 extensions.
+ for _, ext := range proto.RegisteredExtensions(pb) {
+ if !proto.HasExtension(pb, ext) {
+ continue
+ }
+ ep, err := proto.GetExtension(pb, ext)
+ if err != nil {
+ return err
+ }
+ err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func checkRequiredFieldsInValue(v reflect.Value) error {
+ if pm, ok := v.Interface().(proto.Message); ok {
+ return checkRequiredFields(pm)
+ }
+ return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
deleted file mode 100644
index e2e0651a..00000000
--- a/vendor/github.com/golang/protobuf/proto/Makefile
+++ /dev/null
@@ -1,43 +0,0 @@
-# Go support for Protocol Buffers - Google's data interchange format
-#
-# Copyright 2010 The Go Authors. All rights reserved.
-# https://github.com/golang/protobuf
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-install:
- go install
-
-test: install generate-test-pbs
- go test
-
-
-generate-test-pbs:
- make install
- make -C testdata
- protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
- make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
index e392575b..3cd3249f 100644
--- a/vendor/github.com/golang/protobuf/proto/clone.go
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -35,22 +35,39 @@
package proto
import (
+ "fmt"
"log"
"reflect"
"strings"
)
// Clone returns a deep copy of a protocol buffer.
-func Clone(pb Message) Message {
- in := reflect.ValueOf(pb)
+func Clone(src Message) Message {
+ in := reflect.ValueOf(src)
if in.IsNil() {
- return pb
+ return src
}
-
out := reflect.New(in.Type().Elem())
- // out is empty so a merge is a deep copy.
- mergeStruct(out.Elem(), in.Elem())
- return out.Interface().(Message)
+ dst := out.Interface().(Message)
+ Merge(dst, src)
+ return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+ // Merge merges src into this message.
+ // Required and optional fields that are set in src will be set to that value in dst.
+ // Elements of repeated fields will be appended.
+ //
+ // Merge may panic if called with a different argument type than the receiver.
+ Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+ XXX_Merge(src Message)
}
// Merge merges src into dst.
@@ -58,17 +75,24 @@ func Clone(pb Message) Message {
// Elements of repeated fields will be appended.
// Merge panics if src and dst are not the same type, or if dst is nil.
func Merge(dst, src Message) {
+ if m, ok := dst.(Merger); ok {
+ m.Merge(src)
+ return
+ }
+
in := reflect.ValueOf(src)
out := reflect.ValueOf(dst)
if out.IsNil() {
panic("proto: nil destination")
}
if in.Type() != out.Type() {
- // Explicit test prior to mergeStruct so that mistyped nils will fail
- panic("proto: type mismatch")
+ panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
}
if in.IsNil() {
- // Merging nil into non-nil is a quiet no-op
+ return // Merge from nil src is a noop
+ }
+ if m, ok := dst.(generatedMerger); ok {
+ m.XXX_Merge(src)
return
}
mergeStruct(out.Elem(), in.Elem())
@@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) {
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
}
- if emIn, ok := extendable(in.Addr().Interface()); ok {
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
emOut, _ := extendable(out.Addr().Interface())
mIn, muIn := emIn.extensionsRead()
if mIn != nil {
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
index 04dcb881..d9aa3c42 100644
--- a/vendor/github.com/golang/protobuf/proto/decode.go
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -39,8 +39,6 @@ import (
"errors"
"fmt"
"io"
- "os"
- "reflect"
)
// errOverflow is returned when an integer is too large to be represented.
@@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
// wire type is encountered. It does not get returned to user code.
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-// The fundamental decoders that interpret bytes on the wire.
-// Those that take integer types all return uint64 and are
-// therefore of type valueDecoder.
-
// DecodeVarint reads a varint-encoded integer from the slice.
// It returns the integer and the number of bytes consumed, or
// zero if there is not enough.
@@ -61,7 +55,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
// int32, int64, uint32, uint64, bool, and enum
// protocol buffer types.
func DecodeVarint(buf []byte) (x uint64, n int) {
- // x, n already 0
for shift := uint(0); shift < 64; shift += 7 {
if n >= len(buf) {
return 0, 0
@@ -78,13 +71,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
return 0, 0
}
-// DecodeVarint reads a varint-encoded integer from the Buffer.
-// This is the format for the
-// int32, int64, uint32, uint64, bool, and enum
-// protocol buffer types.
-func (p *Buffer) DecodeVarint() (x uint64, err error) {
- // x, err already 0
-
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
i := p.index
l := len(p.buf)
@@ -107,6 +94,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
return
}
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ i := p.index
+ buf := p.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ p.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return p.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, errOverflow
+
+done:
+ p.index = i
+ return x, nil
+}
+
// DecodeFixed64 reads a 64-bit integer from the Buffer.
// This is the format for the
// fixed64, sfixed64, and double protocol buffer types.
@@ -173,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
return
}
-// These are not ValueDecoders: they produce an array of bytes or a string.
-// bytes, embedded messages
-
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
@@ -217,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
return string(buf), nil
}
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-// If the protocol buffer has extensions, and the field matches, add it as an extension.
-// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
-func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
- oi := o.index
-
- err := o.skip(t, tag, wire)
- if err != nil {
- return err
- }
-
- if !unrecField.IsValid() {
- return nil
- }
-
- ptr := structPointer_Bytes(base, unrecField)
-
- // Add the skipped field to struct field
- obuf := o.buf
-
- o.buf = *ptr
- o.EncodeVarint(uint64(tag<<3 | wire))
- *ptr = append(o.buf, obuf[oi:o.index]...)
-
- o.buf = obuf
-
- return nil
-}
-
-// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
-func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
-
- var u uint64
- var err error
-
- switch wire {
- case WireVarint:
- _, err = o.DecodeVarint()
- case WireFixed64:
- _, err = o.DecodeFixed64()
- case WireBytes:
- _, err = o.DecodeRawBytes(false)
- case WireFixed32:
- _, err = o.DecodeFixed32()
- case WireStartGroup:
- for {
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- fwire := int(u & 0x7)
- if fwire == WireEndGroup {
- break
- }
- ftag := int(u >> 3)
- err = o.skip(t, ftag, fwire)
- if err != nil {
- break
- }
- }
- default:
- err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
- }
- return err
-}
-
// Unmarshaler is the interface representing objects that can
-// unmarshal themselves. The method should reset the receiver before
-// decoding starts. The argument points to data that may be
+// unmarshal themselves. The argument points to data that may be
// overwritten, so implementations should not keep references to the
// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
type Unmarshaler interface {
Unmarshal([]byte) error
}
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+ XXX_Unmarshal([]byte) error
+}
+
// Unmarshal parses the protocol buffer representation in buf and places the
// decoded result in pb. If the struct underlying pb does not match
// the data in buf, the results can be unpredictable.
@@ -301,7 +334,13 @@ type Unmarshaler interface {
// to preserve and append to existing data.
func Unmarshal(buf []byte, pb Message) error {
pb.Reset()
- return UnmarshalMerge(buf, pb)
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
}
// UnmarshalMerge parses the protocol buffer representation in buf and
@@ -311,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error {
// UnmarshalMerge merges into existing data in pb.
// Most code should use Unmarshal instead.
func UnmarshalMerge(buf []byte, pb Message) error {
- // If the object can unmarshal itself, let it.
+ if u, ok := pb.(newUnmarshaler); ok {
+ return u.XXX_Unmarshal(buf)
+ }
if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
return u.Unmarshal(buf)
}
return NewBuffer(buf).Unmarshal(pb)
@@ -328,547 +375,54 @@ func (p *Buffer) DecodeMessage(pb Message) error {
}
// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
func (p *Buffer) DecodeGroup(pb Message) error {
- typ, base, err := getbase(pb)
- if err != nil {
- return err
+ b := p.buf[p.index:]
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return io.ErrUnexpectedEOF
}
- return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+ err := Unmarshal(b[:x], pb)
+ p.index += y
+ return err
}
// Unmarshal parses the protocol buffer representation in the
// Buffer and places the decoded result in pb. If the struct
// underlying pb does not match the data in the buffer, the results can be
// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
func (p *Buffer) Unmarshal(pb Message) error {
// If the object can unmarshal itself, let it.
- if u, ok := pb.(Unmarshaler); ok {
- err := u.Unmarshal(p.buf[p.index:])
+ if u, ok := pb.(newUnmarshaler); ok {
+ err := u.XXX_Unmarshal(p.buf[p.index:])
p.index = len(p.buf)
return err
}
-
- typ, base, err := getbase(pb)
- if err != nil {
- return err
- }
-
- err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
-
- if collectStats {
- stats.Decode++
- }
-
- return err
-}
-
-// unmarshalType does the work of unmarshaling a structure.
-func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
- var state errorState
- required, reqFields := prop.reqCount, uint64(0)
-
- var err error
- for err == nil && o.index < len(o.buf) {
- oi := o.index
- var u uint64
- u, err = o.DecodeVarint()
- if err != nil {
- break
- }
- wire := int(u & 0x7)
- if wire == WireEndGroup {
- if is_group {
- if required > 0 {
- // Not enough information to determine the exact field.
- // (See below.)
- return &RequiredNotSetError{"{Unknown}"}
- }
- return nil // input is satisfied
- }
- return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
- }
- tag := int(u >> 3)
- if tag <= 0 {
- return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
- }
- fieldnum, ok := prop.decoderTags.get(tag)
- if !ok {
- // Maybe it's an extension?
- if prop.extendable {
- if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
- if err = o.skip(st, tag, wire); err == nil {
- extmap := e.extensionsWrite()
- ext := extmap[int32(tag)] // may be missing
- ext.enc = append(ext.enc, o.buf[oi:o.index]...)
- extmap[int32(tag)] = ext
- }
- continue
- }
- }
- // Maybe it's a oneof?
- if prop.oneofUnmarshaler != nil {
- m := structPointer_Interface(base, st).(Message)
- // First return value indicates whether tag is a oneof field.
- ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
- if err == ErrInternalBadWireType {
- // Map the error to something more descriptive.
- // Do the formatting here to save generated code space.
- err = fmt.Errorf("bad wiretype for oneof field in %T", m)
- }
- if ok {
- continue
- }
- }
- err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
- continue
- }
- p := prop.Prop[fieldnum]
-
- if p.dec == nil {
- fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
- continue
- }
- dec := p.dec
- if wire != WireStartGroup && wire != p.WireType {
- if wire == WireBytes && p.packedDec != nil {
- // a packable field
- dec = p.packedDec
- } else {
- err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
- continue
- }
- }
- decErr := dec(o, p, base)
- if decErr != nil && !state.shouldContinue(decErr, p) {
- err = decErr
- }
- if err == nil && p.Required {
- // Successfully decoded a required field.
- if tag <= 64 {
- // use bitmap for fields 1-64 to catch field reuse.
- var mask uint64 = 1 << uint64(tag-1)
- if reqFields&mask == 0 {
- // new required field
- reqFields |= mask
- required--
- }
- } else {
- // This is imprecise. It can be fooled by a required field
- // with a tag > 64 that is encoded twice; that's very rare.
- // A fully correct implementation would require allocating
- // a data structure, which we would like to avoid.
- required--
- }
- }
- }
- if err == nil {
- if is_group {
- return io.ErrUnexpectedEOF
- }
- if state.err != nil {
- return state.err
- }
- if required > 0 {
- // Not enough information to determine the exact field. If we use extra
- // CPU, we could determine the field only if the missing required field
- // has a tag <= 64 and we check reqFields.
- return &RequiredNotSetError{"{Unknown}"}
- }
- }
- return err
-}
-
-// Individual type decoders
-// For each,
-// u is the decoded value,
-// v is a pointer to the field (pointer) in the struct
-
-// Sizes of the pools to allocate inside the Buffer.
-// The goal is modest amortization and allocation
-// on at least 16-byte boundaries.
-const (
- boolPoolSize = 16
- uint32PoolSize = 8
- uint64PoolSize = 4
-)
-
-// Decode a bool.
-func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- if len(o.bools) == 0 {
- o.bools = make([]bool, boolPoolSize)
- }
- o.bools[0] = u != 0
- *structPointer_Bool(base, p.field) = &o.bools[0]
- o.bools = o.bools[1:]
- return nil
-}
-
-func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- *structPointer_BoolVal(base, p.field) = u != 0
- return nil
-}
-
-// Decode an int32.
-func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
- return nil
-}
-
-func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
- return nil
-}
-
-// Decode an int64.
-func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64_Set(structPointer_Word64(base, p.field), o, u)
- return nil
-}
-
-func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
- return nil
-}
-
-// Decode a string.
-func (o *Buffer) dec_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_String(base, p.field) = &s
- return nil
-}
-
-func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- *structPointer_StringVal(base, p.field) = s
- return nil
-}
-
-// Decode a slice of bytes ([]byte).
-func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- *structPointer_Bytes(base, p.field) = b
- return nil
-}
-
-// Decode a slice of bools ([]bool).
-func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v := structPointer_BoolSlice(base, p.field)
- *v = append(*v, u != 0)
- return nil
-}
-
-// Decode a slice of bools ([]bool) in packed format.
-func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
- v := structPointer_BoolSlice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded bools
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
-
- y := *v
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- y = append(y, u != 0)
- }
-
- *v = y
- return nil
-}
-
-// Decode a slice of int32s ([]int32).
-func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- structPointer_Word32Slice(base, p.field).Append(uint32(u))
- return nil
-}
-
-// Decode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int32s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(uint32(u))
- }
- return nil
-}
-
-// Decode a slice of int64s ([]int64).
-func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
-
- structPointer_Word64Slice(base, p.field).Append(u)
- return nil
-}
-
-// Decode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Slice(base, p.field)
-
- nn, err := o.DecodeVarint()
- if err != nil {
- return err
- }
- nb := int(nn) // number of bytes of encoded int64s
-
- fin := o.index + nb
- if fin < o.index {
- return errOverflow
- }
- for o.index < fin {
- u, err := p.valDec(o)
- if err != nil {
- return err
- }
- v.Append(u)
- }
- return nil
-}
-
-// Decode a slice of strings ([]string).
-func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
- s, err := o.DecodeStringBytes()
- if err != nil {
- return err
- }
- v := structPointer_StringSlice(base, p.field)
- *v = append(*v, s)
- return nil
-}
-
-// Decode a slice of slice of bytes ([][]byte).
-func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
- b, err := o.DecodeRawBytes(true)
- if err != nil {
- return err
- }
- v := structPointer_BytesSlice(base, p.field)
- *v = append(*v, b)
- return nil
-}
-
-// Decode a map field.
-func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- oi := o.index // index at the end of this map entry
- o.index -= len(raw) // move buffer back to start of map entry
-
- mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
- if mptr.Elem().IsNil() {
- mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
- }
- v := mptr.Elem() // map[K]V
-
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // See enc_new_map for why.
- keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
- keybase := toStructPointer(keyptr.Addr()) // **K
-
- var valbase structPointer
- var valptr reflect.Value
- switch p.mtype.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valptr = reflect.ValueOf(&dummy) // *[]byte
- valbase = toStructPointer(valptr) // *[]byte
- case reflect.Ptr:
- // message; valptr is **Msg; need to allocate the intermediate pointer
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valptr.Set(reflect.New(valptr.Type().Elem()))
- valbase = toStructPointer(valptr)
- default:
- // everything else
- valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
- valbase = toStructPointer(valptr.Addr()) // **V
- }
-
- // Decode.
- // This parses a restricted wire format, namely the encoding of a message
- // with two fields. See enc_new_map for the format.
- for o.index < oi {
- // tagcode for key and value properties are always a single byte
- // because they have tags 1 and 2.
- tagcode := o.buf[o.index]
- o.index++
- switch tagcode {
- case p.mkeyprop.tagcode[0]:
- if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- case p.mvalprop.tagcode[0]:
- if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
- return err
- }
- default:
- // TODO: Should we silently skip this instead?
- return fmt.Errorf("proto: bad map data tag %d", raw[0])
- }
- }
- keyelem, valelem := keyptr.Elem(), valptr.Elem()
- if !keyelem.IsValid() {
- keyelem = reflect.Zero(p.mtype.Key())
- }
- if !valelem.IsValid() {
- valelem = reflect.Zero(p.mtype.Elem())
- }
-
- v.SetMapIndex(keyelem, valelem)
- return nil
-}
-
-// Decode a group.
-func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
- return o.unmarshalType(p.stype, p.sprop, true, bas)
-}
-
-// Decode an embedded message.
-func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
- raw, e := o.DecodeRawBytes(false)
- if e != nil {
- return e
- }
-
- bas := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(bas) {
- // allocate new nested message
- bas = toStructPointer(reflect.New(p.stype))
- structPointer_SetStructPointer(base, p.field, bas)
- }
-
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := structPointer_Interface(bas, p.stype)
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, false, bas)
- o.buf = obuf
- o.index = oi
-
- return err
-}
-
-// Decode a slice of embedded messages.
-func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, false, base)
-}
-
-// Decode a slice of embedded groups.
-func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
- return o.dec_slice_struct(p, true, base)
-}
-
-// Decode a slice of structs ([]*struct).
-func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
- v := reflect.New(p.stype)
- bas := toStructPointer(v)
- structPointer_StructPointerSlice(base, p.field).Append(bas)
-
- if is_group {
- err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
- return err
- }
-
- raw, err := o.DecodeRawBytes(false)
- if err != nil {
+ if u, ok := pb.(Unmarshaler); ok {
+ // NOTE: The history of proto have unfortunately been inconsistent
+ // whether Unmarshaler should or should not implicitly clear itself.
+ // Some implementations do, most do not.
+ // Thus, calling this here may or may not do what people want.
+ //
+ // See https://github.com/golang/protobuf/issues/424
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
return err
}
- // If the object can unmarshal itself, let it.
- if p.isUnmarshaler {
- iv := v.Interface()
- return iv.(Unmarshaler).Unmarshal(raw)
- }
-
- obuf := o.buf
- oi := o.index
- o.buf = raw
- o.index = 0
-
- err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
-
- o.buf = obuf
- o.index = oi
-
+ // Slow workaround for messages that aren't Unmarshalers.
+ // This includes some hand-coded .pb.go files and
+ // bootstrap protos.
+ // TODO: fix all of those and then add Unmarshal to
+ // the Message interface. Then:
+ // The cast above and code below can be deleted.
+ // The old unmarshaler can be deleted.
+ // Clients can call Unmarshal directly (can already do that, actually).
+ var info InternalMessageInfo
+ err := info.Unmarshal(pb, p.buf[p.index:])
+ p.index = len(p.buf)
return err
}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 00000000..dea2617c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+type generatedDiscarder interface {
+ XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+ if m, ok := m.(generatedDiscarder); ok {
+ m.XXX_DiscardUnknown()
+ return
+ }
+ // TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+ // but the master branch has no implementation for InternalMessageInfo,
+ // so it would be more work to replicate that approach.
+ discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+ di := atomicLoadDiscardInfo(&a.discard)
+ if di == nil {
+ di = getDiscardInfo(reflect.TypeOf(m).Elem())
+ atomicStoreDiscardInfo(&a.discard, di)
+ }
+ di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []discardFieldInfo
+ unrecognized field
+}
+
+type discardFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+ discard func(src pointer)
+}
+
+var (
+ discardInfoMap = map[reflect.Type]*discardInfo{}
+ discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+ discardInfoLock.Lock()
+ defer discardInfoLock.Unlock()
+ di := discardInfoMap[t]
+ if di == nil {
+ di = &discardInfo{typ: t}
+ discardInfoMap[t] = di
+ }
+ return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&di.initialized) == 0 {
+ di.computeDiscardInfo()
+ }
+
+ for _, fi := range di.fields {
+ sfp := src.offset(fi.field)
+ fi.discard(sfp)
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+ // Ignore lock since DiscardUnknown is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ DiscardUnknown(m)
+ }
+ }
+ }
+
+ if di.unrecognized.IsValid() {
+ *src.offset(di.unrecognized).toBytes() = nil
+ }
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+ di.lock.Lock()
+ defer di.lock.Unlock()
+ if di.initialized != 0 {
+ return
+ }
+ t := di.typ
+ n := t.NumField()
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ dfi := discardFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+ case isSlice: // E.g., []*pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sps := src.getPointerSlice()
+ for _, sp := range sps {
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ default: // E.g., *pb.T
+ di := getDiscardInfo(tf)
+ dfi.discard = func(src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ di.discard(sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+ default: // E.g., map[K]V
+ if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+ dfi.discard = func(src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ DiscardUnknown(val.Interface().(Message))
+ }
+ }
+ } else {
+ dfi.discard = func(pointer) {} // Noop
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ dfi.discard = func(src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ DiscardUnknown(sv.Interface().(Message))
+ }
+ }
+ }
+ }
+ default:
+ continue
+ }
+ di.fields = append(di.fields, dfi)
+ }
+
+ di.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ di.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+ v := reflect.ValueOf(m)
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return
+ }
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return
+ }
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ vf := v.Field(i)
+ tf := f.Type
+
+ // Unwrap tf to get its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+ }
+
+ switch tf.Kind() {
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+ case isSlice: // E.g., []*pb.T
+ for j := 0; j < vf.Len(); j++ {
+ discardLegacy(vf.Index(j).Interface().(Message))
+ }
+ default: // E.g., *pb.T
+ discardLegacy(vf.Interface().(Message))
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+ default: // E.g., map[K]V
+ tv := vf.Type().Elem()
+ if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+ for _, key := range vf.MapKeys() {
+ val := vf.MapIndex(key)
+ discardLegacy(val.Interface().(Message))
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+ default: // E.g., test_proto.isCommunique_Union interface
+ if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+ vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+ if !vf.IsNil() {
+ vf = vf.Elem() // E.g., test_proto.Communique_Msg
+ vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+ if vf.Kind() == reflect.Ptr {
+ discardLegacy(vf.Interface().(Message))
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+ if vf.Type() != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ vf.Set(reflect.ValueOf([]byte(nil)))
+ }
+
+ // For proto2 messages, only discard unknown fields in message extensions
+ // that have been accessed via GetExtension.
+ if em, err := extendable(m); err == nil {
+ // Ignore lock since discardLegacy is not concurrency safe.
+ emm, _ := em.extensionsRead()
+ for _, mx := range emm {
+ if m, ok := mx.value.(Message); ok {
+ discardLegacy(m)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
index 8c1b8fd1..c27d35f8 100644
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -39,7 +39,6 @@ import (
"errors"
"fmt"
"reflect"
- "sort"
)
// RequiredNotSetError is the error returned if Marshal is called with
@@ -82,10 +81,6 @@ var (
const maxVarintBytes = 10 // maximum length of a varint
-// maxMarshalSize is the largest allowed size of an encoded protobuf,
-// since C++ and Java use signed int32s for the size.
-const maxMarshalSize = 1<<31 - 1
-
// EncodeVarint returns the varint encoding of x.
// This is the format for the
// int32, int64, uint32, uint64, bool, and enum
@@ -119,18 +114,27 @@ func (p *Buffer) EncodeVarint(x uint64) error {
// SizeVarint returns the varint encoding size of an integer.
func SizeVarint(x uint64) int {
- return sizeVarint(x)
-}
-
-func sizeVarint(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
+ switch {
+ case x < 1<<7:
+ return 1
+ case x < 1<<14:
+ return 2
+ case x < 1<<21:
+ return 3
+ case x < 1<<28:
+ return 4
+ case x < 1<<35:
+ return 5
+ case x < 1<<42:
+ return 6
+ case x < 1<<49:
+ return 7
+ case x < 1<<56:
+ return 8
+ case x < 1<<63:
+ return 9
+ }
+ return 10
}
// EncodeFixed64 writes a 64-bit integer to the Buffer.
@@ -149,10 +153,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error {
return nil
}
-func sizeFixed64(x uint64) int {
- return 8
-}
-
// EncodeFixed32 writes a 32-bit integer to the Buffer.
// This is the format for the
// fixed32, sfixed32, and float protocol buffer types.
@@ -165,10 +165,6 @@ func (p *Buffer) EncodeFixed32(x uint64) error {
return nil
}
-func sizeFixed32(x uint64) int {
- return 4
-}
-
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
// to the Buffer.
// This is the format used for the sint64 protocol buffer type.
@@ -177,10 +173,6 @@ func (p *Buffer) EncodeZigzag64(x uint64) error {
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
-func sizeZigzag64(x uint64) int {
- return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
// to the Buffer.
// This is the format used for the sint32 protocol buffer type.
@@ -189,10 +181,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error {
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
}
-func sizeZigzag32(x uint64) int {
- return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
-}
-
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
// This is the format used for the bytes protocol buffer
// type and for embedded messages.
@@ -202,11 +190,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error {
return nil
}
-func sizeRawBytes(b []byte) int {
- return sizeVarint(uint64(len(b))) +
- len(b)
-}
-
// EncodeStringBytes writes an encoded string to the Buffer.
// This is the format used for the proto2 string type.
func (p *Buffer) EncodeStringBytes(s string) error {
@@ -215,326 +198,17 @@ func (p *Buffer) EncodeStringBytes(s string) error {
return nil
}
-func sizeStringBytes(s string) int {
- return sizeVarint(uint64(len(s))) +
- len(s)
-}
-
// Marshaler is the interface representing objects that can marshal themselves.
type Marshaler interface {
Marshal() ([]byte, error)
}
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, returning the data.
-func Marshal(pb Message) ([]byte, error) {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- return m.Marshal()
- }
- p := NewBuffer(nil)
- err := p.Marshal(pb)
- var state errorState
- if err != nil && !state.shouldContinue(err, nil) {
- return nil, err
- }
- if p.buf == nil && err == nil {
- // Return a non-nil slice on success.
- return []byte{}, nil
- }
- return p.buf, err
-}
-
// EncodeMessage writes the protocol buffer to the Buffer,
// prefixed by a varint-encoded length.
func (p *Buffer) EncodeMessage(pb Message) error {
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- var state errorState
- err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
- }
- return err
-}
-
-// Marshal takes the protocol buffer
-// and encodes it into the wire format, writing the result to the
-// Buffer.
-func (p *Buffer) Marshal(pb Message) error {
- // Can the object marshal itself?
- if m, ok := pb.(Marshaler); ok {
- data, err := m.Marshal()
- if err != nil {
- return err
- }
- p.buf = append(p.buf, data...)
- return nil
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return ErrNil
- }
- if err == nil {
- err = p.enc_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- stats.Encode++
- }
-
- if len(p.buf) > maxMarshalSize {
- return ErrTooLarge
- }
- return err
-}
-
-// Size returns the encoded size of a protocol buffer.
-func Size(pb Message) (n int) {
- // Can the object marshal itself? If so, Size is slow.
- // TODO: add Size to Marshaler, or add a Sizer interface.
- if m, ok := pb.(Marshaler); ok {
- b, _ := m.Marshal()
- return len(b)
- }
-
- t, base, err := getbase(pb)
- if structPointer_IsNil(base) {
- return 0
- }
- if err == nil {
- n = size_struct(GetProperties(t.Elem()), base)
- }
-
- if collectStats {
- stats.Size++
- }
-
- return
-}
-
-// Individual type encoders.
-
-// Encode a bool.
-func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := 0
- if *v {
- x = 1
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
- v := *structPointer_BoolVal(base, p.field)
- if !v {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, 1)
- return nil
-}
-
-func size_bool(p *Properties, base structPointer) int {
- v := *structPointer_Bool(base, p.field)
- if v == nil {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-func size_proto3_bool(p *Properties, base structPointer) int {
- v := *structPointer_BoolVal(base, p.field)
- if !v && !p.oneof {
- return 0
- }
- return len(p.tagcode) + 1 // each bool takes exactly one byte
-}
-
-// Encode an int32.
-func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_int32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode a uint32.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return ErrNil
- }
- x := word32_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, uint64(x))
- return nil
-}
-
-func size_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32(base, p.field)
- if word32_IsNil(v) {
- return 0
- }
- x := word32_Get(v)
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-func size_proto3_uint32(p *Properties, base structPointer) (n int) {
- v := structPointer_Word32Val(base, p.field)
- x := word32Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(uint64(x))
- return
-}
-
-// Encode an int64.
-func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return ErrNil
- }
- x := word64_Get(v)
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, x)
- return nil
-}
-
-func size_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64(base, p.field)
- if word64_IsNil(v) {
- return 0
- }
- x := word64_Get(v)
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-func size_proto3_int64(p *Properties, base structPointer) (n int) {
- v := structPointer_Word64Val(base, p.field)
- x := word64Val_Get(v)
- if x == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += p.valSize(x)
- return
-}
-
-// Encode a string.
-func (o *Buffer) enc_string(p *Properties, base structPointer) error {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return ErrNil
- }
- x := *v
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(x)
- return nil
-}
-
-func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
- v := *structPointer_StringVal(base, p.field)
- if v == "" {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(v)
- return nil
-}
-
-func size_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_String(base, p.field)
- if v == nil {
- return 0
- }
- x := *v
- n += len(p.tagcode)
- n += sizeStringBytes(x)
- return
-}
-
-func size_proto3_string(p *Properties, base structPointer) (n int) {
- v := *structPointer_StringVal(base, p.field)
- if v == "" && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeStringBytes(v)
- return
+ siz := Size(pb)
+ p.EncodeVarint(uint64(siz))
+ return p.Marshal(pb)
}
// All protocol buffer fields are nillable, but be careful.
@@ -545,819 +219,3 @@ func isNil(v reflect.Value) bool {
}
return false
}
-
-// Encode a message struct.
-func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
- var state errorState
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return ErrNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- return state.err
- }
-
- o.buf = append(o.buf, p.tagcode...)
- return o.enc_len_struct(p.sprop, structp, &state)
-}
-
-func size_struct_message(p *Properties, base structPointer) int {
- structp := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(structp) {
- return 0
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n0 := len(p.tagcode)
- n1 := sizeRawBytes(data)
- return n0 + n1
- }
-
- n0 := len(p.tagcode)
- n1 := size_struct(p.sprop, structp)
- n2 := sizeVarint(uint64(n1)) // size of encoded length
- return n0 + n1 + n2
-}
-
-// Encode a group struct.
-func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
- var state errorState
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return ErrNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
- err := o.enc_struct(p.sprop, b)
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return state.err
-}
-
-func size_struct_group(p *Properties, base structPointer) (n int) {
- b := structPointer_GetStructPointer(base, p.field)
- if structPointer_IsNil(b) {
- return 0
- }
-
- n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
- n += size_struct(p.sprop, b)
- n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
- return
-}
-
-// Encode a slice of bools ([]bool).
-func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- for _, x := range s {
- o.buf = append(o.buf, p.tagcode...)
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_bool(p *Properties, base structPointer) int {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
-}
-
-// Encode a slice of bools ([]bool) in packed format.
-func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
- for _, x := range s {
- v := uint64(0)
- if x {
- v = 1
- }
- p.valEnc(o, v)
- }
- return nil
-}
-
-func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
- s := *structPointer_BoolSlice(base, p.field)
- l := len(s)
- if l == 0 {
- return 0
- }
- n += len(p.tagcode)
- n += sizeVarint(uint64(l))
- n += l // each bool takes exactly one byte
- return
-}
-
-// Encode a slice of bytes ([]byte).
-func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if s == nil {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 {
- return ErrNil
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(s)
- return nil
-}
-
-func size_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if s == nil && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
- s := *structPointer_Bytes(base, p.field)
- if len(s) == 0 && !p.oneof {
- return 0
- }
- n += len(p.tagcode)
- n += sizeRawBytes(s)
- return
-}
-
-// Encode a slice of int32s ([]int32).
-func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of int32s ([]int32) in packed format.
-func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- p.valEnc(buf, uint64(x))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
- bufSize += p.valSize(uint64(x))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of uint32s ([]uint32).
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- x := s.Index(i)
- p.valEnc(o, uint64(x))
- }
- return nil
-}
-
-func size_slice_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- x := s.Index(i)
- n += p.valSize(uint64(x))
- }
- return
-}
-
-// Encode a slice of uint32s ([]uint32) in packed format.
-// Exactly the same as int32, except for no sign extension.
-func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, uint64(s.Index(i)))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
- s := structPointer_Word32Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(uint64(s.Index(i)))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of int64s ([]int64).
-func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- p.valEnc(o, s.Index(i))
- }
- return nil
-}
-
-func size_slice_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- for i := 0; i < l; i++ {
- n += len(p.tagcode)
- n += p.valSize(s.Index(i))
- }
- return
-}
-
-// Encode a slice of int64s ([]int64) in packed format.
-func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return ErrNil
- }
- // TODO: Reuse a Buffer.
- buf := NewBuffer(nil)
- for i := 0; i < l; i++ {
- p.valEnc(buf, s.Index(i))
- }
-
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeVarint(uint64(len(buf.buf)))
- o.buf = append(o.buf, buf.buf...)
- return nil
-}
-
-func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
- s := structPointer_Word64Slice(base, p.field)
- l := s.Len()
- if l == 0 {
- return 0
- }
- var bufSize int
- for i := 0; i < l; i++ {
- bufSize += p.valSize(s.Index(i))
- }
-
- n += len(p.tagcode)
- n += sizeVarint(uint64(bufSize))
- n += bufSize
- return
-}
-
-// Encode a slice of slice of bytes ([][]byte).
-func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return ErrNil
- }
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
- ss := *structPointer_BytesSlice(base, p.field)
- l := len(ss)
- if l == 0 {
- return 0
- }
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeRawBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of strings ([]string).
-func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- for i := 0; i < l; i++ {
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeStringBytes(ss[i])
- }
- return nil
-}
-
-func size_slice_string(p *Properties, base structPointer) (n int) {
- ss := *structPointer_StringSlice(base, p.field)
- l := len(ss)
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- n += sizeStringBytes(ss[i])
- }
- return
-}
-
-// Encode a slice of message structs ([]*struct).
-func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return errRepeatedHasNil
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, err := m.Marshal()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- o.buf = append(o.buf, p.tagcode...)
- o.EncodeRawBytes(data)
- continue
- }
-
- o.buf = append(o.buf, p.tagcode...)
- err := o.enc_len_struct(p.sprop, structp, &state)
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
- }
- return state.err
-}
-
-func size_slice_struct_message(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
- n += l * len(p.tagcode)
- for i := 0; i < l; i++ {
- structp := s.Index(i)
- if structPointer_IsNil(structp) {
- return // return the size up to this point
- }
-
- // Can the object marshal itself?
- if p.isMarshaler {
- m := structPointer_Interface(structp, p.stype).(Marshaler)
- data, _ := m.Marshal()
- n += len(p.tagcode)
- n += sizeRawBytes(data)
- continue
- }
-
- n0 := size_struct(p.sprop, structp)
- n1 := sizeVarint(uint64(n0)) // size of encoded length
- n += n0 + n1
- }
- return
-}
-
-// Encode a slice of group structs ([]*struct).
-func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
- var state errorState
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return errRepeatedHasNil
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
-
- err := o.enc_struct(p.sprop, b)
-
- if err != nil && !state.shouldContinue(err, nil) {
- if err == ErrNil {
- return errRepeatedHasNil
- }
- return err
- }
-
- o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
- }
- return state.err
-}
-
-func size_slice_struct_group(p *Properties, base structPointer) (n int) {
- s := structPointer_StructPointerSlice(base, p.field)
- l := s.Len()
-
- n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
- n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
- for i := 0; i < l; i++ {
- b := s.Index(i)
- if structPointer_IsNil(b) {
- return // return size up to this point
- }
-
- n += size_struct(p.sprop, b)
- }
- return
-}
-
-// Encode an extension map.
-func (o *Buffer) enc_map(p *Properties, base structPointer) error {
- exts := structPointer_ExtMap(base, p.field)
- if err := encodeExtensionsMap(*exts); err != nil {
- return err
- }
-
- return o.enc_map_body(*exts)
-}
-
-func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
- exts := structPointer_Extensions(base, p.field)
- if err := encodeExtensions(exts); err != nil {
- return err
- }
- v, _ := exts.extensionsRead()
-
- return o.enc_map_body(v)
-}
-
-func (o *Buffer) enc_map_body(v map[int32]Extension) error {
- // Fast-path for common cases: zero or one extensions.
- if len(v) <= 1 {
- for _, e := range v {
- o.buf = append(o.buf, e.enc...)
- }
- return nil
- }
-
- // Sort keys to provide a deterministic encoding.
- keys := make([]int, 0, len(v))
- for k := range v {
- keys = append(keys, int(k))
- }
- sort.Ints(keys)
-
- for _, k := range keys {
- o.buf = append(o.buf, v[int32(k)].enc...)
- }
- return nil
-}
-
-func size_map(p *Properties, base structPointer) int {
- v := structPointer_ExtMap(base, p.field)
- return extensionsMapSize(*v)
-}
-
-func size_exts(p *Properties, base structPointer) int {
- v := structPointer_Extensions(base, p.field)
- return extensionsSize(v)
-}
-
-// Encode a map field.
-func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
- var state errorState // XXX: or do we need to plumb this through?
-
- /*
- A map defined as
- map<key_type, value_type> map_field = N;
- is encoded in the same way as
- message MapFieldEntry {
- key_type key = 1;
- value_type value = 2;
- }
- repeated MapFieldEntry map_field = N;
- */
-
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
- if v.Len() == 0 {
- return nil
- }
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- enc := func() error {
- if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
- return err
- }
- if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
- return err
- }
- return nil
- }
-
- // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
-
- keycopy.Set(key)
- valcopy.Set(val)
-
- o.buf = append(o.buf, p.tagcode...)
- if err := o.enc_len_thing(enc, &state); err != nil {
- return err
- }
- }
- return nil
-}
-
-func size_new_map(p *Properties, base structPointer) int {
- v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
-
- keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
-
- n := 0
- for _, key := range v.MapKeys() {
- val := v.MapIndex(key)
- keycopy.Set(key)
- valcopy.Set(val)
-
- // Tag codes for key and val are the responsibility of the sub-sizer.
- keysize := p.mkeyprop.size(p.mkeyprop, keybase)
- valsize := p.mvalprop.size(p.mvalprop, valbase)
- entry := keysize + valsize
- // Add on tag code and length of map entry itself.
- n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
- }
- return n
-}
-
-// mapEncodeScratch returns a new reflect.Value matching the map's value type,
-// and a structPointer suitable for passing to an encoder or sizer.
-func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
- // Prepare addressable doubly-indirect placeholders for the key and value types.
- // This is needed because the element-type encoders expect **T, but the map iteration produces T.
-
- keycopy = reflect.New(mapType.Key()).Elem() // addressable K
- keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
- keyptr.Set(keycopy.Addr()) //
- keybase = toStructPointer(keyptr.Addr()) // **K
-
- // Value types are more varied and require special handling.
- switch mapType.Elem().Kind() {
- case reflect.Slice:
- // []byte
- var dummy []byte
- valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
- valbase = toStructPointer(valcopy.Addr())
- case reflect.Ptr:
- // message; the generated field type is map[K]*Msg (so V is *Msg),
- // so we only need one level of indirection.
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valbase = toStructPointer(valcopy.Addr())
- default:
- // everything else
- valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
- valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
- valptr.Set(valcopy.Addr()) //
- valbase = toStructPointer(valptr.Addr()) // **V
- }
- return
-}
-
-// Encode a struct.
-func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
- var state errorState
- // Encode fields in tag order so that decoders may use optimizations
- // that depend on the ordering.
- // https://developers.google.com/protocol-buffers/docs/encoding#order
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.enc != nil {
- err := p.enc(o, p, base)
- if err != nil {
- if err == ErrNil {
- if p.Required && state.err == nil {
- state.err = &RequiredNotSetError{p.Name}
- }
- } else if err == errRepeatedHasNil {
- // Give more context to nil values in repeated fields.
- return errors.New("repeated field " + p.OrigName + " has nil element")
- } else if !state.shouldContinue(err, p) {
- return err
- }
- }
- if len(o.buf) > maxMarshalSize {
- return ErrTooLarge
- }
- }
- }
-
- // Do oneof fields.
- if prop.oneofMarshaler != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- if err := prop.oneofMarshaler(m, o); err == ErrNil {
- return errOneofHasNil
- } else if err != nil {
- return err
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- if len(o.buf)+len(v) > maxMarshalSize {
- return ErrTooLarge
- }
- if len(v) > 0 {
- o.buf = append(o.buf, v...)
- }
- }
-
- return state.err
-}
-
-func size_struct(prop *StructProperties, base structPointer) (n int) {
- for _, i := range prop.order {
- p := prop.Prop[i]
- if p.size != nil {
- n += p.size(p, base)
- }
- }
-
- // Add unrecognized fields at the end.
- if prop.unrecField.IsValid() {
- v := *structPointer_Bytes(base, prop.unrecField)
- n += len(v)
- }
-
- // Factor in any oneof fields.
- if prop.oneofSizer != nil {
- m := structPointer_Interface(base, prop.stype).(Message)
- n += prop.oneofSizer(m)
- }
-
- return
-}
-
-var zeroes [20]byte // longer than any conceivable sizeVarint
-
-// Encode a struct, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
- return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
-}
-
-// Encode something, preceded by its encoded length (as a varint).
-func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
- iLen := len(o.buf)
- o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
- iMsg := len(o.buf)
- err := enc()
- if err != nil && !state.shouldContinue(err, nil) {
- return err
- }
- lMsg := len(o.buf) - iMsg
- lLen := sizeVarint(uint64(lMsg))
- switch x := lLen - (iMsg - iLen); {
- case x > 0: // actual length is x bytes larger than the space we reserved
- // Move msg x bytes right.
- o.buf = append(o.buf, zeroes[:x]...)
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- case x < 0: // actual length is x bytes smaller than the space we reserved
- // Move msg x bytes left.
- copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
- o.buf = o.buf[:len(o.buf)+x] // x is negative
- }
- // Encode the length in the reserved space.
- o.buf = o.buf[:iLen]
- o.EncodeVarint(uint64(lMsg))
- o.buf = o.buf[:len(o.buf)+lMsg]
- return state.err
-}
-
-// errorState maintains the first error that occurs and updates that error
-// with additional context.
-type errorState struct {
- err error
-}
-
-// shouldContinue reports whether encoding should continue upon encountering the
-// given error. If the error is RequiredNotSetError, shouldContinue returns true
-// and, if this is the first appearance of that error, remembers it for future
-// reporting.
-//
-// If prop is not nil, it may update any error with additional context about the
-// field with the error.
-func (s *errorState) shouldContinue(err error, prop *Properties) bool {
- // Ignore unset required fields.
- reqNotSet, ok := err.(*RequiredNotSetError)
- if !ok {
- return false
- }
- if s.err == nil {
- if prop != nil {
- err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
- }
- s.err = err
- }
- return true
-}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
index 8b16f951..d4db5a1c 100644
--- a/vendor/github.com/golang/protobuf/proto/equal.go
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -54,13 +54,17 @@ Equality is defined in this way:
in a proto3 .proto file, fields are not "set"; specifically,
zero length proto3 "bytes" fields are equal (nil == {}).
- Two repeated fields are equal iff their lengths are the same,
- and their corresponding elements are equal (a "bytes" field,
- although represented by []byte, is not a repeated field)
+ and their corresponding elements are equal. Note a "bytes" field,
+ although represented by []byte, is not a repeated field and the
+ rule for the scalar fields described above applies.
- Two unset fields are equal.
- Two unknown field sets are equal if their current
encoded state is equal.
- Two extension sets are equal iff they have corresponding
elements that are pairwise equal.
+ - Two map fields are equal iff their lengths are the same,
+ and they contain the same set of elements. Zero-length map
+ fields are equal.
- Every other combination of things are not equal.
The return value is undefined if a and b are not protocol buffers.
@@ -105,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool {
// set/unset mismatch
return false
}
- b1, ok := f1.Interface().(raw)
- if ok {
- b2 := f2.Interface().(raw)
- // RawMessage
- if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
- return false
- }
- continue
- }
f1, f2 = f1.Elem(), f2.Elem()
}
if !equalAny(f1, f2, sprop.Prop[i]) {
@@ -142,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
u1 := uf.Bytes()
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
- if !bytes.Equal(u1, u2) {
- return false
- }
-
- return true
+ return bytes.Equal(u1, u2)
}
// v1 and v2 are known to have the same type.
@@ -257,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
m1, m2 := e1.value, e2.value
+ if m1 == nil && m2 == nil {
+ // Both have only encoded form.
+ if bytes.Equal(e1.enc, e2.enc) {
+ continue
+ }
+ // The bytes are different, but the extensions might still be
+ // equal. We need to decode them to compare.
+ }
+
if m1 != nil && m2 != nil {
// Both are unencoded.
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
@@ -272,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
desc = m[extNum]
}
if desc == nil {
+ // If both have only encoded form and the bytes are the same,
+ // it is handled above. We get here when the bytes are different.
+ // We don't know how to decode it, so just compare them as byte
+ // slices.
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
- continue
+ return false
}
var err error
if m1 == nil {
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
index 6b9b3637..816a3b9d 100644
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -38,6 +38,7 @@ package proto
import (
"errors"
"fmt"
+ "io"
"reflect"
"strconv"
"sync"
@@ -91,14 +92,29 @@ func (n notLocker) Unlock() {}
// extendable returns the extendableProto interface for the given generated proto message.
// If the proto message has the old extension format, it returns a wrapper that implements
// the extendableProto interface.
-func extendable(p interface{}) (extendableProto, bool) {
- if ep, ok := p.(extendableProto); ok {
- return ep, ok
- }
- if ep, ok := p.(extendableProtoV1); ok {
- return extensionAdapter{ep}, ok
+func extendable(p interface{}) (extendableProto, error) {
+ switch p := p.(type) {
+ case extendableProto:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return p, nil
+ case extendableProtoV1:
+ if isNilPtr(p) {
+ return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+ }
+ return extensionAdapter{p}, nil
}
- return nil, false
+ // Don't allocate a specific error containing %T:
+ // this is the hot path for Clone and MarshalText.
+ return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+ v := reflect.ValueOf(x)
+ return v.Kind() == reflect.Ptr && v.IsNil()
}
// XXX_InternalExtensions is an internal representation of proto extensions.
@@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc
return e.p.extensionMap, &e.p.mu
}
-var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
-var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
-
// ExtensionDesc represents an extension specification.
// Used in generated code from the protocol compiler.
type ExtensionDesc struct {
@@ -154,6 +167,7 @@ type ExtensionDesc struct {
Field int32 // field number
Name string // fully-qualified name of extension, for text formatting
Tag string // protobuf tag style
+ Filename string // name of the file in which the extension is defined
}
func (ed *ExtensionDesc) repeated() bool {
@@ -178,8 +192,8 @@ type Extension struct {
// SetRawExtension is for testing only.
func SetRawExtension(base Message, id int32, b []byte) {
- epb, ok := extendable(base)
- if !ok {
+ epb, err := extendable(base)
+ if err != nil {
return
}
extmap := epb.extensionsWrite()
@@ -204,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
pbi = ea.extendableProtoV1
}
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
- return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
}
// Check the range.
if !isExtensionField(pb, extension.Field) {
@@ -249,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
return prop
}
-// encode encodes any unmarshaled (unencoded) extensions in e.
-func encodeExtensions(e *XXX_InternalExtensions) error {
- m, mu := e.extensionsRead()
- if m == nil {
- return nil // fast path
- }
- mu.Lock()
- defer mu.Unlock()
- return encodeExtensionsMap(m)
-}
-
-// encode encodes any unmarshaled (unencoded) extensions in e.
-func encodeExtensionsMap(m map[int32]Extension) error {
- for k, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- et := reflect.TypeOf(e.desc.ExtensionType)
- props := extensionProperties(e.desc)
-
- p := NewBuffer(nil)
- // If e.value has type T, the encoder expects a *struct{ X T }.
- // Pass a *T with a zero field and hope it all works out.
- x := reflect.New(et)
- x.Elem().Set(reflect.ValueOf(e.value))
- if err := props.enc(p, props, toStructPointer(x)); err != nil {
- return err
- }
- e.enc = p.buf
- m[k] = e
- }
- return nil
-}
-
-func extensionsSize(e *XXX_InternalExtensions) (n int) {
- m, mu := e.extensionsRead()
- if m == nil {
- return 0
- }
- mu.Lock()
- defer mu.Unlock()
- return extensionsMapSize(m)
-}
-
-func extensionsMapSize(m map[int32]Extension) (n int) {
- for _, e := range m {
- if e.value == nil || e.desc == nil {
- // Extension is only in its encoded form.
- n += len(e.enc)
- continue
- }
-
- // We don't skip extensions that have an encoded form set,
- // because the extension value may have been mutated after
- // the last time this function was called.
-
- et := reflect.TypeOf(e.desc.ExtensionType)
- props := extensionProperties(e.desc)
-
- // If e.value has type T, the encoder expects a *struct{ X T }.
- // Pass a *T with a zero field and hope it all works out.
- x := reflect.New(et)
- x.Elem().Set(reflect.ValueOf(e.value))
- n += props.size(props, toStructPointer(x))
- }
- return
-}
-
// HasExtension returns whether the given extension is present in pb.
func HasExtension(pb Message, extension *ExtensionDesc) bool {
// TODO: Check types, field numbers, etc.?
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return false
}
extmap, mu := epb.extensionsRead()
@@ -335,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool {
return false
}
mu.Lock()
- _, ok = extmap[extension.Field]
+ _, ok := extmap[extension.Field]
mu.Unlock()
return ok
}
// ClearExtension removes the given extension from pb.
func ClearExtension(pb Message, extension *ExtensionDesc) {
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return
}
// TODO: Check types, field numbers, etc.?
@@ -351,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) {
delete(extmap, extension.Field)
}
-// GetExtension parses and returns the given extension of pb.
-// If the extension is not present and has no default value it returns ErrMissingExtension.
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
- if err := checkExtensionTypes(epb, extension); err != nil {
- return nil, err
+ if extension.ExtendedType != nil {
+ // can only check type if this is a complete descriptor
+ if err := checkExtensionTypes(epb, extension); err != nil {
+ return nil, err
+ }
}
emap, mu := epb.extensionsRead()
@@ -387,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
return e.value, nil
}
+ if extension.ExtensionType == nil {
+ // incomplete descriptor
+ return e.enc, nil
+ }
+
v, err := decodeExtension(e.enc, extension)
if err != nil {
return nil, err
@@ -404,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
// defaultExtensionValue returns the default value for extension.
// If no default for an extension is defined ErrMissingExtension is returned.
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ if extension.ExtensionType == nil {
+ // incomplete descriptor, so no default
+ return nil, ErrMissingExtension
+ }
+
t := reflect.TypeOf(extension.ExtensionType)
props := extensionProperties(extension)
@@ -438,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
// decodeExtension decodes an extension encoded in b.
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
- o := NewBuffer(b)
-
t := reflect.TypeOf(extension.ExtensionType)
-
- props := extensionProperties(extension)
+ unmarshal := typeUnmarshaler(t, extension.Tag)
// t is a pointer to a struct, pointer to basic type or a slice.
- // Allocate a "field" to store the pointer/slice itself; the
- // pointer/slice will be stored here. We pass
- // the address of this field to props.dec.
- // This passes a zero field and a *t and lets props.dec
- // interpret it as a *struct{ x t }.
+ // Allocate space to store the pointer/slice.
value := reflect.New(t).Elem()
+ var err error
for {
- // Discard wire type and field number varint. It isn't needed.
- if _, err := o.DecodeVarint(); err != nil {
- return nil, err
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
}
+ b = b[n:]
+ wire := int(x) & 7
- if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+ if err != nil {
return nil, err
}
- if o.index >= len(o.buf) {
+ if len(b) == 0 {
break
}
}
@@ -472,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
// The returned slice has the same length as es; missing extensions will appear as nil elements.
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
extensions = make([]interface{}, len(es))
for i, e := range es {
@@ -493,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
// just the Field field, which defines the extension's field number.
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
- epb, ok := extendable(pb)
- if !ok {
- return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+ epb, err := extendable(pb)
+ if err != nil {
+ return nil, err
}
registeredExtensions := RegisteredExtensions(pb)
@@ -522,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
// SetExtension sets the specified extension of pb to the specified value.
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
- epb, ok := extendable(pb)
- if !ok {
- return errors.New("proto: not an extendable proto")
+ epb, err := extendable(pb)
+ if err != nil {
+ return err
}
if err := checkExtensionTypes(epb, extension); err != nil {
return err
@@ -549,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error
// ClearAllExtensions clears all extensions from pb.
func ClearAllExtensions(pb Message) {
- epb, ok := extendable(pb)
- if !ok {
+ epb, err := extendable(pb)
+ if err != nil {
return
}
m := epb.extensionsWrite()
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
index ac4ddbc0..0e2191b8 100644
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -73,7 +73,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- - Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
@@ -266,6 +265,7 @@ package proto
import (
"encoding/json"
+ "errors"
"fmt"
"log"
"reflect"
@@ -274,6 +274,8 @@ import (
"sync"
)
+var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string")
+
// Message is implemented by generated protocol buffer messages.
type Message interface {
Reset()
@@ -310,16 +312,7 @@ type Buffer struct {
buf []byte // encode/decode byte stream
index int // read point
- // pools of basic types to amortize allocation.
- bools []bool
- uint32s []uint32
- uint64s []uint64
-
- // extra pools, only used with pointer_reflect.go
- int32s []int32
- int64s []int64
- float32s []float32
- float64s []float64
+ deterministic bool
}
// NewBuffer allocates a new Buffer and initializes its internal data to
@@ -344,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) {
// Bytes returns the contents of the Buffer.
func (p *Buffer) Bytes() []byte { return p.buf }
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+// - Repeated serialization of a message will return the same bytes.
+// - Different processes of the same binary (which may be executing on
+// different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+ p.deterministic = deterministic
+}
+
/*
* Helper routines for simplifying the creation of optional fields of basic type.
*/
@@ -832,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
return sf, false, nil
}
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
// Map fields may have key types of non-float scalars, strings and enums.
-// The easiest way to sort them in some deterministic order is to use fmt.
-// If this turns out to be inefficient we can always consider other options,
-// such as doing a Schwartzian transform.
-
func mapKeys(vs []reflect.Value) sort.Interface {
- s := mapKeySorter{
- vs: vs,
- // default Less function: textual comparison
- less: func(a, b reflect.Value) bool {
- return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
- },
- }
+ s := mapKeySorter{vs: vs}
- // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
- // numeric keys are sorted numerically.
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
if len(vs) == 0 {
return s
}
@@ -856,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface {
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
case reflect.Uint32, reflect.Uint64:
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ case reflect.Bool:
+ s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+ case reflect.String:
+ s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+ default:
+ panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
}
return s
@@ -896,3 +909,13 @@ const ProtoPackageIsVersion2 = true
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
// to assert that that code is compatible with this version of the proto package.
const ProtoPackageIsVersion1 = true
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+ marshal *marshalInfo
+ unmarshal *unmarshalInfo
+ merge *mergeInfo
+ discard *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
index fd982dec..3b6ca41d 100644
--- a/vendor/github.com/golang/protobuf/proto/message_set.go
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -42,6 +42,7 @@ import (
"fmt"
"reflect"
"sort"
+ "sync"
)
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
@@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item {
}
func (ms *messageSet) Has(pb Message) bool {
- if ms.find(pb) != nil {
- return true
- }
- return false
+ return ms.find(pb) != nil
}
func (ms *messageSet) Unmarshal(pb Message) error {
@@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte {
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
func MarshalMessageSet(exts interface{}) ([]byte, error) {
- var m map[int32]Extension
+ return marshalMessageSet(exts, false)
+}
+
+// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
+func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
switch exts := exts.(type) {
case *XXX_InternalExtensions:
- if err := encodeExtensions(exts); err != nil {
- return nil, err
- }
- m, _ = exts.extensionsRead()
+ var u marshalInfo
+ siz := u.sizeMessageSet(exts)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, exts, deterministic)
+
case map[int32]Extension:
- if err := encodeExtensionsMap(exts); err != nil {
- return nil, err
+ // This is an old-style extension map.
+ // Wrap it in a new-style XXX_InternalExtensions.
+ ie := XXX_InternalExtensions{
+ p: &struct {
+ mu sync.Mutex
+ extensionMap map[int32]Extension
+ }{
+ extensionMap: exts,
+ },
}
- m = exts
+
+ var u marshalInfo
+ siz := u.sizeMessageSet(&ie)
+ b := make([]byte, 0, siz)
+ return u.appendMessageSet(b, &ie, deterministic)
+
default:
return nil, errors.New("proto: not an extension map")
}
-
- // Sort extension IDs to provide a deterministic encoding.
- // See also enc_map in encode.go.
- ids := make([]int, 0, len(m))
- for id := range m {
- ids = append(ids, int(id))
- }
- sort.Ints(ids)
-
- ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
- for _, id := range ids {
- e := m[int32(id)]
- // Remove the wire type and field number varint, as well as the length varint.
- msg := skipVarint(skipVarint(e.enc))
-
- ms.Item = append(ms.Item, &_MessageSet_Item{
- TypeId: Int32(int32(id)),
- Message: msg,
- })
- }
- return Marshal(ms)
}
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
-// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
var m map[int32]Extension
switch exts := exts.(type) {
@@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
var m map[int32]Extension
switch exts := exts.(type) {
case *XXX_InternalExtensions:
- m, _ = exts.extensionsRead()
+ var mu sync.Locker
+ m, mu = exts.extensionsRead()
+ if m != nil {
+ // Keep the extensions map locked until we're done marshaling to prevent
+ // races between marshaling and unmarshaling the lazily-{en,de}coded
+ // values.
+ mu.Lock()
+ defer mu.Unlock()
+ }
case map[int32]Extension:
m = exts
default:
@@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
for i, id := range ids {
ext := m[id]
- if i > 0 {
- b.WriteByte(',')
- }
-
msd, ok := messageSetMap[id]
if !ok {
// Unknown type; we can't render it, so skip it.
continue
}
+
+ if i > 0 && b.Len() > 1 {
+ b.WriteByte(',')
+ }
+
fmt.Fprintf(&b, `"[%s]":`, msd.name)
x := ext.value
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
index fb512e2e..b6cad908 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// +build appengine js
+// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
@@ -38,32 +38,13 @@
package proto
import (
- "math"
"reflect"
+ "sync"
)
-// A structPointer is a pointer to a struct.
-type structPointer struct {
- v reflect.Value
-}
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-// The reflect value must itself be a pointer to a struct.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer{v}
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p.v.IsNil()
-}
+const unsafeAllowed = false
-// Interface returns the struct pointer as an interface value.
-func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
- return p.v.Interface()
-}
-
-// A field identifies a field in a struct, accessible from a structPointer.
+// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by the sequence of field indices
// passed to reflect's FieldByIndex.
type field []int
@@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier.
var invalidField = field(nil)
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool { return f != nil }
-// field returns the given field in the struct as a reflect value.
-func structPointer_field(p structPointer, f field) reflect.Value {
- // Special case: an extension map entry with a value of type T
- // passes a *T to the struct-handling code with a zero field,
- // expecting that it will be treated as equivalent to *struct{ X T },
- // which has the same memory layout. We have to handle that case
- // specially, because reflect will panic if we call FieldByIndex on a
- // non-struct.
- if f == nil {
- return p.v.Elem()
- }
-
- return p.v.Elem().FieldByIndex(f)
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+ v reflect.Value
}
-// ifield returns the given field in the struct as an interface value.
-func structPointer_ifield(p structPointer, f field) interface{} {
- return structPointer_field(p, f).Addr().Interface()
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ return pointer{v: reflect.ValueOf(*i)}
}
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return structPointer_ifield(p, f).(*[]byte)
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ v := reflect.ValueOf(*i)
+ u := reflect.New(v.Type())
+ u.Elem().Set(v)
+ return pointer{v: u}
}
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return structPointer_ifield(p, f).(*[][]byte)
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{v: v}
}
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return structPointer_ifield(p, f).(**bool)
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
}
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return structPointer_ifield(p, f).(*bool)
+func (p pointer) isNil() bool {
+ return p.v.IsNil()
}
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return structPointer_ifield(p, f).(*[]bool)
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+ n, m := s.Len(), s.Cap()
+ if n < m {
+ s.SetLen(n + 1)
+ } else {
+ s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+ }
+ return s.Index(n)
}
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return structPointer_ifield(p, f).(**string)
+func (p pointer) toInt64() *int64 {
+ return p.v.Interface().(*int64)
}
-
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return structPointer_ifield(p, f).(*string)
+func (p pointer) toInt64Ptr() **int64 {
+ return p.v.Interface().(**int64)
}
-
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return structPointer_ifield(p, f).(*[]string)
+func (p pointer) toInt64Slice() *[]int64 {
+ return p.v.Interface().(*[]int64)
}
-// Extensions returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
- return structPointer_ifield(p, f).(*XXX_InternalExtensions)
-}
+var int32ptr = reflect.TypeOf((*int32)(nil))
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return structPointer_ifield(p, f).(*map[int32]Extension)
+func (p pointer) toInt32() *int32 {
+ return p.v.Convert(int32ptr).Interface().(*int32)
}
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return structPointer_field(p, f).Addr()
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return p.v.Interface().(**int32)
}
-
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- structPointer_field(p, f).Set(q.v)
+ func (p pointer) toInt32Slice() *[]int32 {
+ return p.v.Interface().(*[]int32)
}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return structPointer{structPointer_field(p, f)}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().(*int32)
+ }
+ // an enum
+ return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+ // Allocate value in a *int32. Possibly convert that to a *enum.
+ // Then assign it to a **int32 or **enum.
+ // Note: we can convert *int32 to *enum, but we can't convert
+ // **int32 to **enum!
+ p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ return p.v.Elem().Interface().([]int32)
+ }
+ // an enum
+ // Allocate a []int32, then assign []enum's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := p.v.Elem()
+ s := make([]int32, slice.Len())
+ for i := 0; i < slice.Len(); i++ {
+ s[i] = int32(slice.Index(i).Int())
+ }
+ return s
}
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
- return structPointerSlice{structPointer_field(p, f)}
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+ if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+ // raw int32 type
+ p.v.Elem().Set(reflect.ValueOf(v))
+ return
+ }
+ // an enum
+ // Allocate a []enum, then assign []int32's values into it.
+ // Note: we can't convert []enum to []int32.
+ slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+ for i, x := range v {
+ slice.Index(i).SetInt(int64(x))
+ }
+ p.v.Elem().Set(slice)
}
-
-// A structPointerSlice represents the address of a slice of pointers to structs
-// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
-type structPointerSlice struct {
- v reflect.Value
+func (p pointer) appendInt32Slice(v int32) {
+ grow(p.v.Elem()).SetInt(int64(v))
}
-func (p structPointerSlice) Len() int { return p.v.Len() }
-func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
-func (p structPointerSlice) Append(q structPointer) {
- p.v.Set(reflect.Append(p.v, q.v))
+func (p pointer) toUint64() *uint64 {
+ return p.v.Interface().(*uint64)
}
-
-var (
- int32Type = reflect.TypeOf(int32(0))
- uint32Type = reflect.TypeOf(uint32(0))
- float32Type = reflect.TypeOf(float32(0))
- int64Type = reflect.TypeOf(int64(0))
- uint64Type = reflect.TypeOf(uint64(0))
- float64Type = reflect.TypeOf(float64(0))
-)
-
-// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
-// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
-type word32 struct {
- v reflect.Value
+func (p pointer) toUint64Ptr() **uint64 {
+ return p.v.Interface().(**uint64)
}
-
-// IsNil reports whether p is nil.
-func word32_IsNil(p word32) bool {
- return p.v.IsNil()
+func (p pointer) toUint64Slice() *[]uint64 {
+ return p.v.Interface().(*[]uint64)
}
-
-// Set sets p to point at a newly allocated word with bits set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- t := p.v.Type().Elem()
- switch t {
- case int32Type:
- if len(o.int32s) == 0 {
- o.int32s = make([]int32, uint32PoolSize)
- }
- o.int32s[0] = int32(x)
- p.v.Set(reflect.ValueOf(&o.int32s[0]))
- o.int32s = o.int32s[1:]
- return
- case uint32Type:
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint32s[0]))
- o.uint32s = o.uint32s[1:]
- return
- case float32Type:
- if len(o.float32s) == 0 {
- o.float32s = make([]float32, uint32PoolSize)
- }
- o.float32s[0] = math.Float32frombits(x)
- p.v.Set(reflect.ValueOf(&o.float32s[0]))
- o.float32s = o.float32s[1:]
- return
- }
-
- // must be enum
- p.v.Set(reflect.New(t))
- p.v.Elem().SetInt(int64(int32(x)))
+func (p pointer) toUint32() *uint32 {
+ return p.v.Interface().(*uint32)
}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32_Get(p word32) uint32 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
+func (p pointer) toUint32Ptr() **uint32 {
+ return p.v.Interface().(**uint32)
}
-
-// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32{structPointer_field(p, f)}
+func (p pointer) toUint32Slice() *[]uint32 {
+ return p.v.Interface().(*[]uint32)
}
-
-// A word32Val represents a field of type int32, uint32, float32, or enum.
-// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
-type word32Val struct {
- v reflect.Value
+func (p pointer) toBool() *bool {
+ return p.v.Interface().(*bool)
}
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- switch p.v.Type() {
- case int32Type:
- p.v.SetInt(int64(x))
- return
- case uint32Type:
- p.v.SetUint(uint64(x))
- return
- case float32Type:
- p.v.SetFloat(float64(math.Float32frombits(x)))
- return
- }
-
- // must be enum
- p.v.SetInt(int64(int32(x)))
+func (p pointer) toBoolPtr() **bool {
+ return p.v.Interface().(**bool)
}
-
-// Get gets the bits pointed at by p, as a uint32.
-func word32Val_Get(p word32Val) uint32 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
+func (p pointer) toBoolSlice() *[]bool {
+ return p.v.Interface().(*[]bool)
}
-
-// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val{structPointer_field(p, f)}
+func (p pointer) toFloat64() *float64 {
+ return p.v.Interface().(*float64)
}
-
-// A word32Slice is a slice of 32-bit values.
-// That is, v.Type() is []int32, []uint32, []float32, or []enum.
-type word32Slice struct {
- v reflect.Value
+func (p pointer) toFloat64Ptr() **float64 {
+ return p.v.Interface().(**float64)
}
-
-func (p word32Slice) Append(x uint32) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int32:
- elem.SetInt(int64(int32(x)))
- case reflect.Uint32:
- elem.SetUint(uint64(x))
- case reflect.Float32:
- elem.SetFloat(float64(math.Float32frombits(x)))
- }
+func (p pointer) toFloat64Slice() *[]float64 {
+ return p.v.Interface().(*[]float64)
}
-
-func (p word32Slice) Len() int {
- return p.v.Len()
+func (p pointer) toFloat32() *float32 {
+ return p.v.Interface().(*float32)
}
-
-func (p word32Slice) Index(i int) uint32 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int32:
- return uint32(elem.Int())
- case reflect.Uint32:
- return uint32(elem.Uint())
- case reflect.Float32:
- return math.Float32bits(float32(elem.Float()))
- }
- panic("unreachable")
+func (p pointer) toFloat32Ptr() **float32 {
+ return p.v.Interface().(**float32)
}
-
-// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) word32Slice {
- return word32Slice{structPointer_field(p, f)}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return p.v.Interface().(*[]float32)
}
-
-// word64 is like word32 but for 64-bit values.
-type word64 struct {
- v reflect.Value
+func (p pointer) toString() *string {
+ return p.v.Interface().(*string)
}
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- t := p.v.Type().Elem()
- switch t {
- case int64Type:
- if len(o.int64s) == 0 {
- o.int64s = make([]int64, uint64PoolSize)
- }
- o.int64s[0] = int64(x)
- p.v.Set(reflect.ValueOf(&o.int64s[0]))
- o.int64s = o.int64s[1:]
- return
- case uint64Type:
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- p.v.Set(reflect.ValueOf(&o.uint64s[0]))
- o.uint64s = o.uint64s[1:]
- return
- case float64Type:
- if len(o.float64s) == 0 {
- o.float64s = make([]float64, uint64PoolSize)
- }
- o.float64s[0] = math.Float64frombits(x)
- p.v.Set(reflect.ValueOf(&o.float64s[0]))
- o.float64s = o.float64s[1:]
- return
- }
- panic("unreachable")
+func (p pointer) toStringPtr() **string {
+ return p.v.Interface().(**string)
}
-
-func word64_IsNil(p word64) bool {
- return p.v.IsNil()
+func (p pointer) toStringSlice() *[]string {
+ return p.v.Interface().(*[]string)
}
-
-func word64_Get(p word64) uint64 {
- elem := p.v.Elem()
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
- }
- panic("unreachable")
+func (p pointer) toBytes() *[]byte {
+ return p.v.Interface().(*[]byte)
}
-
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64{structPointer_field(p, f)}
+func (p pointer) toBytesSlice() *[][]byte {
+ return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+ return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+ p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+ grow(p.v.Elem()).Set(q.v)
}
-// word64Val is like word32Val but for 64-bit values.
-type word64Val struct {
- v reflect.Value
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+ if p.v.IsNil() {
+ return nil
+ }
+ n := p.v.Elem().Len()
+ s := make([]pointer, n)
+ for i := 0; i < n; i++ {
+ s[i] = pointer{v: p.v.Elem().Index(i)}
+ }
+ return s
}
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- switch p.v.Type() {
- case int64Type:
- p.v.SetInt(int64(x))
- return
- case uint64Type:
- p.v.SetUint(x)
- return
- case float64Type:
- p.v.SetFloat(math.Float64frombits(x))
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ if v == nil {
+ p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
return
}
- panic("unreachable")
+ s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+ for _, p := range v {
+ s = reflect.Append(s, p.v)
+ }
+ p.v.Elem().Set(s)
}
-func word64Val_Get(p word64Val) uint64 {
- elem := p.v
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return elem.Uint()
- case reflect.Float64:
- return math.Float64bits(elem.Float())
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ if p.v.Elem().IsNil() {
+ return pointer{v: p.v.Elem()}
}
- panic("unreachable")
+ return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
}
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val{structPointer_field(p, f)}
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ // TODO: check that p.v.Type().Elem() == t?
+ return p.v
}
-type word64Slice struct {
- v reflect.Value
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
}
-
-func (p word64Slice) Append(x uint64) {
- n, m := p.v.Len(), p.v.Cap()
- if n < m {
- p.v.SetLen(n + 1)
- } else {
- t := p.v.Type().Elem()
- p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
- }
- elem := p.v.Index(n)
- switch elem.Kind() {
- case reflect.Int64:
- elem.SetInt(int64(int64(x)))
- case reflect.Uint64:
- elem.SetUint(uint64(x))
- case reflect.Float64:
- elem.SetFloat(float64(math.Float64frombits(x)))
- }
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
}
-
-func (p word64Slice) Len() int {
- return p.v.Len()
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
}
-
-func (p word64Slice) Index(i int) uint64 {
- elem := p.v.Index(i)
- switch elem.Kind() {
- case reflect.Int64:
- return uint64(elem.Int())
- case reflect.Uint64:
- return uint64(elem.Uint())
- case reflect.Float64:
- return math.Float64bits(float64(elem.Float()))
- }
- panic("unreachable")
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
}
-
-func structPointer_Word64Slice(p structPointer, f field) word64Slice {
- return word64Slice{structPointer_field(p, f)}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomicLock.Lock()
+ defer atomicLock.Unlock()
+ *p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
index 6b5567d4..d55a335d 100644
--- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -29,7 +29,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// +build !appengine,!js
+// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.
@@ -37,38 +37,13 @@ package proto
import (
"reflect"
+ "sync/atomic"
"unsafe"
)
-// NOTE: These type_Foo functions would more idiomatically be methods,
-// but Go does not allow methods on pointer types, and we must preserve
-// some pointer type for the garbage collector. We use these
-// funcs with clunky names as our poor approximation to methods.
-//
-// An alternative would be
-// type structPointer struct { p unsafe.Pointer }
-// but that does not registerize as well.
-
-// A structPointer is a pointer to a struct.
-type structPointer unsafe.Pointer
-
-// toStructPointer returns a structPointer equivalent to the given reflect value.
-func toStructPointer(v reflect.Value) structPointer {
- return structPointer(unsafe.Pointer(v.Pointer()))
-}
-
-// IsNil reports whether p is nil.
-func structPointer_IsNil(p structPointer) bool {
- return p == nil
-}
-
-// Interface returns the struct pointer, assumed to have element type t,
-// as an interface value.
-func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
- return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
-}
+const unsafeAllowed = true
-// A field identifies a field in a struct, accessible from a structPointer.
+// A field identifies a field in a struct, accessible from a pointer.
// In this implementation, a field is identified by its byte offset from the start of the struct.
type field uintptr
@@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field {
// invalidField is an invalid field identifier.
const invalidField = ^field(0)
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
// IsValid reports whether the field identifier is valid.
func (f field) IsValid() bool {
- return f != ^field(0)
+ return f != invalidField
}
-// Bytes returns the address of a []byte field in the struct.
-func structPointer_Bytes(p structPointer, f field) *[]byte {
- return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+ p unsafe.Pointer
}
-// BytesSlice returns the address of a [][]byte field in the struct.
-func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
- return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
-}
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
-// Bool returns the address of a *bool field in the struct.
-func structPointer_Bool(p structPointer, f field) **bool {
- return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ // Saves ~25ns over the equivalent:
+ // return valToPointer(reflect.ValueOf(*i))
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
-// BoolVal returns the address of a bool field in the struct.
-func structPointer_BoolVal(p structPointer, f field) *bool {
- return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+ // Super-tricky - read or get the address of data word of interface value.
+ if isptr {
+ // The interface is of pointer type, thus it is a direct interface.
+ // The data word is the pointer data itself. We take its address.
+ return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+ }
+ // The interface is not of pointer type. The data word is the pointer
+ // to the data.
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
}
-// BoolSlice returns the address of a []bool field in the struct.
-func structPointer_BoolSlice(p structPointer, f field) *[]bool {
- return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+ return pointer{p: unsafe.Pointer(v.Pointer())}
}
-// String returns the address of a *string field in the struct.
-func structPointer_String(p structPointer, f field) **string {
- return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+ // For safety, we should panic if !f.IsValid, however calling panic causes
+ // this to no longer be inlineable, which is a serious performance cost.
+ /*
+ if !f.IsValid() {
+ panic("invalid field")
+ }
+ */
+ return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
}
-// StringVal returns the address of a string field in the struct.
-func structPointer_StringVal(p structPointer, f field) *string {
- return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) isNil() bool {
+ return p.p == nil
}
-// StringSlice returns the address of a []string field in the struct.
-func structPointer_StringSlice(p structPointer, f field) *[]string {
- return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) toInt64() *int64 {
+ return (*int64)(p.p)
}
-
-// ExtMap returns the address of an extension map field in the struct.
-func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
- return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) toInt64Ptr() **int64 {
+ return (**int64)(p.p)
}
-
-func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
- return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) toInt64Slice() *[]int64 {
+ return (*[]int64)(p.p)
}
-
-// NewAt returns the reflect.Value for a pointer to a field in the struct.
-func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
- return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+func (p pointer) toInt32() *int32 {
+ return (*int32)(p.p)
}
-// SetStructPointer writes a *struct field in the struct.
-func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
- *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+ func (p pointer) toInt32Ptr() **int32 {
+ return (**int32)(p.p)
+ }
+ func (p pointer) toInt32Slice() *[]int32 {
+ return (*[]int32)(p.p)
+ }
+*/
+func (p pointer) getInt32Ptr() *int32 {
+ return *(**int32)(p.p)
}
-
-// GetStructPointer reads a *struct field in the struct.
-func structPointer_GetStructPointer(p structPointer, f field) structPointer {
- return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) setInt32Ptr(v int32) {
+ *(**int32)(p.p) = &v
}
-// StructPointerSlice the address of a []*struct field in the struct.
-func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
- return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+ return *(*[]int32)(p.p)
}
-// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
-type structPointerSlice []structPointer
-
-func (v *structPointerSlice) Len() int { return len(*v) }
-func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
-func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
-
-// A word32 is the address of a "pointer to 32-bit value" field.
-type word32 **uint32
-
-// IsNil reports whether *v is nil.
-func word32_IsNil(p word32) bool {
- return *p == nil
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+ *(*[]int32)(p.p) = v
}
-// Set sets *v to point at a newly allocated word set to x.
-func word32_Set(p word32, o *Buffer, x uint32) {
- if len(o.uint32s) == 0 {
- o.uint32s = make([]uint32, uint32PoolSize)
- }
- o.uint32s[0] = x
- *p = &o.uint32s[0]
- o.uint32s = o.uint32s[1:]
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+ s := (*[]int32)(p.p)
+ *s = append(*s, v)
}
-// Get gets the value pointed at by *v.
-func word32_Get(p word32) uint32 {
- return **p
+func (p pointer) toUint64() *uint64 {
+ return (*uint64)(p.p)
}
-
-// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32(p structPointer, f field) word32 {
- return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+func (p pointer) toUint64Ptr() **uint64 {
+ return (**uint64)(p.p)
}
-
-// A word32Val is the address of a 32-bit value field.
-type word32Val *uint32
-
-// Set sets *p to x.
-func word32Val_Set(p word32Val, x uint32) {
- *p = x
+func (p pointer) toUint64Slice() *[]uint64 {
+ return (*[]uint64)(p.p)
}
-
-// Get gets the value pointed at by p.
-func word32Val_Get(p word32Val) uint32 {
- return *p
+func (p pointer) toUint32() *uint32 {
+ return (*uint32)(p.p)
}
-
-// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
-func structPointer_Word32Val(p structPointer, f field) word32Val {
- return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+func (p pointer) toUint32Ptr() **uint32 {
+ return (**uint32)(p.p)
}
-
-// A word32Slice is a slice of 32-bit values.
-type word32Slice []uint32
-
-func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
-func (v *word32Slice) Len() int { return len(*v) }
-func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
-
-// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
-func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
- return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func (p pointer) toUint32Slice() *[]uint32 {
+ return (*[]uint32)(p.p)
}
-
-// word64 is like word32 but for 64-bit values.
-type word64 **uint64
-
-func word64_Set(p word64, o *Buffer, x uint64) {
- if len(o.uint64s) == 0 {
- o.uint64s = make([]uint64, uint64PoolSize)
- }
- o.uint64s[0] = x
- *p = &o.uint64s[0]
- o.uint64s = o.uint64s[1:]
+func (p pointer) toBool() *bool {
+ return (*bool)(p.p)
}
-
-func word64_IsNil(p word64) bool {
- return *p == nil
+func (p pointer) toBoolPtr() **bool {
+ return (**bool)(p.p)
}
-
-func word64_Get(p word64) uint64 {
- return **p
+func (p pointer) toBoolSlice() *[]bool {
+ return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+ return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+ return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+ return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+ return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+ return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+ return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+ return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+ return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+ return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+ return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+ return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+ return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+ return (*map[int32]Extension)(p.p)
}
-func structPointer_Word64(p structPointer, f field) word64 {
- return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We load it as []pointer.
+ return *(*[]pointer)(p.p)
}
-// word64Val is like word32Val but for 64-bit values.
-type word64Val *uint64
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+ // Super-tricky - p should point to a []*T where T is a
+ // message type. We store it as []pointer.
+ *(*[]pointer)(p.p) = v
+}
-func word64Val_Set(p word64Val, o *Buffer, x uint64) {
- *p = x
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+ return pointer{p: *(*unsafe.Pointer)(p.p)}
}
-func word64Val_Get(p word64Val) uint64 {
- return *p
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+ *(*unsafe.Pointer)(p.p) = q.p
}
-func structPointer_Word64Val(p structPointer, f field) word64Val {
- return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+ s := (*[]unsafe.Pointer)(p.p)
+ *s = append(*s, q.p)
}
-// word64Slice is like word32Slice but for 64-bit values.
-type word64Slice []uint64
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+ // Super-tricky - read pointer out of data word of interface value.
+ return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
-func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
-func (v *word64Slice) Len() int { return len(*v) }
-func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+ return reflect.NewAt(t, p.p)
+}
-func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
- return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+ return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+ return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+ return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+ return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
index 69ddda8d..f710adab 100644
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -58,42 +58,6 @@ const (
WireFixed32 = 5
)
-const startSize = 10 // initial slice/string sizes
-
-// Encoders are defined in encode.go
-// An encoder outputs the full representation of a field, including its
-// tag and encoder type.
-type encoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueEncoder encodes a single integer in a particular encoding.
-type valueEncoder func(o *Buffer, x uint64) error
-
-// Sizers are defined in encode.go
-// A sizer returns the encoded size of a field, including its tag and encoder
-// type.
-type sizer func(prop *Properties, base structPointer) int
-
-// A valueSizer returns the encoded size of a single integer in a particular
-// encoding.
-type valueSizer func(x uint64) int
-
-// Decoders are defined in decode.go
-// A decoder creates a value from its wire representation.
-// Unrecognized subelements are saved in unrec.
-type decoder func(p *Buffer, prop *Properties, base structPointer) error
-
-// A valueDecoder decodes a single integer in a particular encoding.
-type valueDecoder func(o *Buffer) (x uint64, err error)
-
-// A oneofMarshaler does the marshaling for all oneof fields in a message.
-type oneofMarshaler func(Message, *Buffer) error
-
-// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
-type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
-
-// A oneofSizer does the sizing for all oneof fields in a message.
-type oneofSizer func(Message) int
-
// tagMap is an optimization over map[int]int for typical protocol buffer
// use-cases. Encoded protocol buffers are often in tag order with small tag
// numbers.
@@ -140,13 +104,6 @@ type StructProperties struct {
decoderTags tagMap // map from proto tag to struct field number
decoderOrigNames map[string]int // map from original name to struct field number
order []int // list of struct field numbers in tag order
- unrecField field // field id of the XXX_unrecognized []byte field
- extendable bool // is this an extendable proto
-
- oneofMarshaler oneofMarshaler
- oneofUnmarshaler oneofUnmarshaler
- oneofSizer oneofSizer
- stype reflect.Type
// OneofTypes contains information about the oneof fields in this message.
// It is keyed by the original name of a field.
@@ -187,36 +144,19 @@ type Properties struct {
Default string // default value
HasDefault bool // whether an explicit default was provided
- def_uint64 uint64
-
- enc encoder
- valEnc valueEncoder // set for bool and numeric types only
- field field
- tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
- tagbuf [8]byte
- stype reflect.Type // set for struct types only
- sprop *StructProperties // set for struct types only
- isMarshaler bool
- isUnmarshaler bool
+
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
mtype reflect.Type // set for map types only
mkeyprop *Properties // set for map types only
mvalprop *Properties // set for map types only
-
- size sizer
- valSize valueSizer // set for bool and numeric types only
-
- dec decoder
- valDec valueDecoder // set for bool and numeric types only
-
- // If this is a packable field, this will be the decoder for the packed version of the field.
- packedDec decoder
}
// String formats the properties in the protobuf struct field tag style.
func (p *Properties) String() string {
s := p.Wire
- s = ","
+ s += ","
s += strconv.Itoa(p.Tag)
if p.Required {
s += ",req"
@@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) {
switch p.Wire {
case "varint":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeVarint
- p.valDec = (*Buffer).DecodeVarint
- p.valSize = sizeVarint
case "fixed32":
p.WireType = WireFixed32
- p.valEnc = (*Buffer).EncodeFixed32
- p.valDec = (*Buffer).DecodeFixed32
- p.valSize = sizeFixed32
case "fixed64":
p.WireType = WireFixed64
- p.valEnc = (*Buffer).EncodeFixed64
- p.valDec = (*Buffer).DecodeFixed64
- p.valSize = sizeFixed64
case "zigzag32":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag32
- p.valDec = (*Buffer).DecodeZigzag32
- p.valSize = sizeZigzag32
case "zigzag64":
p.WireType = WireVarint
- p.valEnc = (*Buffer).EncodeZigzag64
- p.valDec = (*Buffer).DecodeZigzag64
- p.valSize = sizeZigzag64
case "bytes", "group":
p.WireType = WireBytes
// no numeric converter for non-numeric types
@@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) {
return
}
+outer:
for i := 2; i < len(fields); i++ {
f := fields[i]
switch {
@@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) {
if i+1 < len(fields) {
// Commas aren't escaped, and def is always last.
p.Default += "," + strings.Join(fields[i+1:], ",")
- break
+ break outer
}
}
}
}
-func logNoSliceEnc(t1, t2 reflect.Type) {
- fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
-}
-
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
-// Initialize the fields for encoding and decoding.
-func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
- p.enc = nil
- p.dec = nil
- p.size = nil
-
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
switch t1 := typ; t1.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
-
- // proto3 scalar types
-
- case reflect.Bool:
- p.enc = (*Buffer).enc_proto3_bool
- p.dec = (*Buffer).dec_proto3_bool
- p.size = size_proto3_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_proto3_int32
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_proto3_uint32
- p.dec = (*Buffer).dec_proto3_int32 // can reuse
- p.size = size_proto3_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_proto3_int64
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int32
- p.size = size_proto3_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_proto3_int64
- p.size = size_proto3_int64
- case reflect.String:
- p.enc = (*Buffer).enc_proto3_string
- p.dec = (*Buffer).dec_proto3_string
- p.size = size_proto3_string
-
case reflect.Ptr:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
- break
- case reflect.Bool:
- p.enc = (*Buffer).enc_bool
- p.dec = (*Buffer).dec_bool
- p.size = size_bool
- case reflect.Int32:
- p.enc = (*Buffer).enc_int32
- p.dec = (*Buffer).dec_int32
- p.size = size_int32
- case reflect.Uint32:
- p.enc = (*Buffer).enc_uint32
- p.dec = (*Buffer).dec_int32 // can reuse
- p.size = size_uint32
- case reflect.Int64, reflect.Uint64:
- p.enc = (*Buffer).enc_int64
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.Float32:
- p.enc = (*Buffer).enc_uint32 // can just treat them as bits
- p.dec = (*Buffer).dec_int32
- p.size = size_uint32
- case reflect.Float64:
- p.enc = (*Buffer).enc_int64 // can just treat them as bits
- p.dec = (*Buffer).dec_int64
- p.size = size_int64
- case reflect.String:
- p.enc = (*Buffer).enc_string
- p.dec = (*Buffer).dec_string
- p.size = size_string
- case reflect.Struct:
+ if t1.Elem().Kind() == reflect.Struct {
p.stype = t1.Elem()
- p.isMarshaler = isMarshaler(t1)
- p.isUnmarshaler = isUnmarshaler(t1)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_struct_message
- p.dec = (*Buffer).dec_struct_message
- p.size = size_struct_message
- } else {
- p.enc = (*Buffer).enc_struct_group
- p.dec = (*Buffer).dec_struct_group
- p.size = size_struct_group
- }
}
case reflect.Slice:
- switch t2 := t1.Elem(); t2.Kind() {
- default:
- logNoSliceEnc(t1, t2)
- break
- case reflect.Bool:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_bool
- p.size = size_slice_packed_bool
- } else {
- p.enc = (*Buffer).enc_slice_bool
- p.size = size_slice_bool
- }
- p.dec = (*Buffer).dec_slice_bool
- p.packedDec = (*Buffer).dec_slice_packed_bool
- case reflect.Int32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int32
- p.size = size_slice_packed_int32
- } else {
- p.enc = (*Buffer).enc_slice_int32
- p.size = size_slice_int32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Uint32:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case reflect.Int64, reflect.Uint64:
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- case reflect.Uint8:
- p.dec = (*Buffer).dec_slice_byte
- if p.proto3 {
- p.enc = (*Buffer).enc_proto3_slice_byte
- p.size = size_proto3_slice_byte
- } else {
- p.enc = (*Buffer).enc_slice_byte
- p.size = size_slice_byte
- }
- case reflect.Float32, reflect.Float64:
- switch t2.Bits() {
- case 32:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_uint32
- p.size = size_slice_packed_uint32
- } else {
- p.enc = (*Buffer).enc_slice_uint32
- p.size = size_slice_uint32
- }
- p.dec = (*Buffer).dec_slice_int32
- p.packedDec = (*Buffer).dec_slice_packed_int32
- case 64:
- // can just treat them as bits
- if p.Packed {
- p.enc = (*Buffer).enc_slice_packed_int64
- p.size = size_slice_packed_int64
- } else {
- p.enc = (*Buffer).enc_slice_int64
- p.size = size_slice_int64
- }
- p.dec = (*Buffer).dec_slice_int64
- p.packedDec = (*Buffer).dec_slice_packed_int64
- default:
- logNoSliceEnc(t1, t2)
- break
- }
- case reflect.String:
- p.enc = (*Buffer).enc_slice_string
- p.dec = (*Buffer).dec_slice_string
- p.size = size_slice_string
- case reflect.Ptr:
- switch t3 := t2.Elem(); t3.Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
- break
- case reflect.Struct:
- p.stype = t2.Elem()
- p.isMarshaler = isMarshaler(t2)
- p.isUnmarshaler = isUnmarshaler(t2)
- if p.Wire == "bytes" {
- p.enc = (*Buffer).enc_slice_struct_message
- p.dec = (*Buffer).dec_slice_struct_message
- p.size = size_slice_struct_message
- } else {
- p.enc = (*Buffer).enc_slice_struct_group
- p.dec = (*Buffer).dec_slice_struct_group
- p.size = size_slice_struct_group
- }
- }
- case reflect.Slice:
- switch t2.Elem().Kind() {
- default:
- fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
- break
- case reflect.Uint8:
- p.enc = (*Buffer).enc_slice_slice_byte
- p.dec = (*Buffer).dec_slice_slice_byte
- p.size = size_slice_slice_byte
- }
+ if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+ p.stype = t2.Elem()
}
case reflect.Map:
- p.enc = (*Buffer).enc_new_map
- p.dec = (*Buffer).dec_new_map
- p.size = size_new_map
-
p.mtype = t1
p.mkeyprop = &Properties{}
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
@@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
}
- // precalculate tag code
- wire := p.WireType
- if p.Packed {
- wire = WireBytes
- }
- x := uint32(p.Tag)<<3 | uint32(wire)
- i := 0
- for i = 0; x > 127; i++ {
- p.tagbuf[i] = 0x80 | uint8(x&0x7F)
- x >>= 7
- }
- p.tagbuf[i] = uint8(x)
- p.tagcode = p.tagbuf[0 : i+1]
-
if p.stype != nil {
if lockGetProp {
p.sprop = GetProperties(p.stype)
@@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
}
var (
- marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
- unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
)
-// isMarshaler reports whether type t implements Marshaler.
-func isMarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isMarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isMarshaler")
- }
- return t.Implements(marshalerType)
-}
-
-// isUnmarshaler reports whether type t implements Unmarshaler.
-func isUnmarshaler(t reflect.Type) bool {
- // We're checking for (likely) pointer-receiver methods
- // so if t is not a pointer, something is very wrong.
- // The calls above only invoke isUnmarshaler on pointer types.
- if t.Kind() != reflect.Ptr {
- panic("proto: misuse of isUnmarshaler")
- }
- return t.Implements(unmarshalerType)
-}
-
// Init populates the properties from a protocol buffer struct tag.
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
p.init(typ, name, tag, f, true)
@@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
// "bytes,49,opt,def=hello!"
p.Name = name
p.OrigName = name
- if f != nil {
- p.field = toField(f)
- }
if tag == "" {
return
}
p.Parse(tag)
- p.setEncAndDec(typ, f, lockGetProp)
+ p.setFieldProps(typ, f, lockGetProp)
}
var (
@@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
propertiesMap[t] = prop
// build properties
- prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
- reflect.PtrTo(t).Implements(extendableProtoV1Type)
- prop.unrecField = invalidField
prop.Prop = make([]*Properties, t.NumField())
prop.order = make([]int, t.NumField())
@@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
name := f.Name
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
- if f.Name == "XXX_InternalExtensions" { // special case
- p.enc = (*Buffer).enc_exts
- p.dec = nil // not needed
- p.size = size_exts
- } else if f.Name == "XXX_extensions" { // special case
- p.enc = (*Buffer).enc_map
- p.dec = nil // not needed
- p.size = size_map
- } else if f.Name == "XXX_unrecognized" { // special case
- prop.unrecField = toField(&f)
- }
oneof := f.Tag.Get("protobuf_oneof") // special case
if oneof != "" {
// Oneof fields don't use the traditional protobuf tag.
@@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
print("\n")
}
- if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
- fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
- }
}
// Re-order prop.order.
@@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
}
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
var oots []interface{}
- prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
- prop.stype = t
+ _, _, _, oots = om.XXX_OneofFuncs()
// Interpret oneof metadata.
prop.OneofTypes = make(map[string]*OneofProperties)
@@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
return prop
}
-// Return the Properties object for the x[0]'th field of the structure.
-func propByIndex(t reflect.Type, x []int) *Properties {
- if len(x) != 1 {
- fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
- return nil
- }
- prop := GetProperties(t)
- return prop.Prop[x[0]]
-}
-
-// Get the address and type of a pointer to a struct from an interface.
-func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
- if pb == nil {
- err = ErrNil
- return
- }
- // get the reflect type of the pointer to the struct.
- t = reflect.TypeOf(pb)
- // get the address of the struct.
- value := reflect.ValueOf(pb)
- b = toStructPointer(value)
- return
-}
-
// A global registry of enum types.
// The generated code will register the generated maps by calling RegisterEnum.
@@ -826,28 +469,65 @@ func EnumValueMap(enumType string) map[string]int32 {
// A registry of all linked message types.
// The string is a fully-qualified proto name ("pkg.Message").
var (
- protoTypes = make(map[string]reflect.Type)
- revProtoTypes = make(map[reflect.Type]string)
+ protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
+ protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
+ revProtoTypes = make(map[reflect.Type]string)
)
// RegisterType is called from generated code and maps from the fully qualified
// proto name to the type (pointer to struct) of the protocol buffer.
func RegisterType(x Message, name string) {
- if _, ok := protoTypes[name]; ok {
+ if _, ok := protoTypedNils[name]; ok {
// TODO: Some day, make this a panic.
log.Printf("proto: duplicate proto type registered: %s", name)
return
}
t := reflect.TypeOf(x)
- protoTypes[name] = t
+ if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+ // Generated code always calls RegisterType with nil x.
+ // This check is just for extra safety.
+ protoTypedNils[name] = x
+ } else {
+ protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+ }
+ revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+ if reflect.TypeOf(x).Kind() != reflect.Map {
+ panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+ }
+ if _, ok := protoMapTypes[name]; ok {
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoMapTypes[name] = t
revProtoTypes[t] = name
}
// MessageName returns the fully-qualified proto name for the given message type.
-func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+func MessageName(x Message) string {
+ type xname interface {
+ XXX_MessageName() string
+ }
+ if m, ok := x.(xname); ok {
+ return m.XXX_MessageName()
+ }
+ return revProtoTypes[reflect.TypeOf(x)]
+}
// MessageType returns the message type (pointer to struct) for a named message.
-func MessageType(name string) reflect.Type { return protoTypes[name] }
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+ if t, ok := protoTypedNils[name]; ok {
+ return reflect.TypeOf(t)
+ }
+ return protoMapTypes[name]
+}
// A registry of all linked proto files.
var (
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 00000000..0f212b30
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2681 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+ typ reflect.Type
+ fields []*marshalFieldInfo
+ unrecognized field // offset of XXX_unrecognized
+ extensions field // offset of XXX_InternalExtensions
+ v1extensions field // offset of XXX_extensions
+ sizecache field // offset of XXX_sizecache
+ initialized int32 // 0 -- only typ is set, 1 -- fully initialized
+ messageset bool // uses message set wire format
+ hasmarshaler bool // has custom marshaler
+ sync.RWMutex // protect extElems map, also for initialization
+ extElems map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+ field field
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isPointer bool
+ required bool // field is required
+ name string // name of the field, for error reporting
+ oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+ wiretag uint64 // tag in wire format
+ tagsize int // size of tag in wire format
+ sizer sizer
+ marshaler marshaler
+ isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+ marshalInfoMap = map[reflect.Type]*marshalInfo{}
+ marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+ marshalInfoLock.Lock()
+ u, ok := marshalInfoMap[t]
+ if !ok {
+ u = &marshalInfo{typ: t}
+ marshalInfoMap[t] = u
+ }
+ marshalInfoLock.Unlock()
+ return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return 0
+ }
+ return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+ u := getMessageMarshalInfo(msg, a)
+ ptr := toPointer(&msg)
+ if ptr.isNil() {
+ // We get here if msg is a typed nil ((*SomeMessage)(nil)),
+ // so it satisfies the interface, and msg == nil wouldn't
+ // catch it. We don't want crash in this case.
+ return b, ErrNil
+ }
+ return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+ // u := a.marshal, but atomically.
+ // We use an atomic here to ensure memory consistency.
+ u := atomicLoadMarshalInfo(&a.marshal)
+ if u == nil {
+ // Get marshal information from type of message.
+ t := reflect.ValueOf(msg).Type()
+ if t.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+ }
+ u = getMarshalInfo(t.Elem())
+ // Store it in the cache for later users.
+ // a.marshal = u, but atomically.
+ atomicStoreMarshalInfo(&a.marshal, u)
+ }
+ return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ n := 0
+ for _, f := range u.fields {
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ n += f.sizer(ptr.offset(f.field), f.tagsize)
+ }
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ n += u.sizeMessageSet(e)
+ } else {
+ n += u.sizeExtensions(e)
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ n += u.sizeV1Extensions(m)
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ n += len(s)
+ }
+ // cache the result for use in marshal
+ if u.sizecache.IsValid() {
+ atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+ }
+ return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+ if u.sizecache.IsValid() {
+ return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+ }
+ return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeMarshalInfo()
+ }
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if u.hasmarshaler {
+ m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+ b1, err := m.Marshal()
+ b = append(b, b1...)
+ return b, err
+ }
+
+ var err, errreq error
+ // The old marshaler encodes extensions at beginning.
+ if u.extensions.IsValid() {
+ e := ptr.offset(u.extensions).toExtensions()
+ if u.messageset {
+ b, err = u.appendMessageSet(b, e, deterministic)
+ } else {
+ b, err = u.appendExtensions(b, e, deterministic)
+ }
+ if err != nil {
+ return b, err
+ }
+ }
+ if u.v1extensions.IsValid() {
+ m := *ptr.offset(u.v1extensions).toOldExtensions()
+ b, err = u.appendV1Extensions(b, m, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ for _, f := range u.fields {
+ if f.required && errreq == nil {
+ if ptr.offset(f.field).getPointer().isNil() {
+ // Required field is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ errreq = &RequiredNotSetError{f.name}
+ continue
+ }
+ }
+ if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+ // nil pointer always marshals to nothing
+ continue
+ }
+ b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+ if err != nil {
+ if err1, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = &RequiredNotSetError{f.name + "." + err1.field}
+ }
+ continue
+ }
+ if err == errRepeatedHasNil {
+ err = errors.New("proto: repeated field " + f.name + " has nil element")
+ }
+ return b, err
+ }
+ }
+ if u.unrecognized.IsValid() {
+ s := *ptr.offset(u.unrecognized).toBytes()
+ b = append(b, s...)
+ }
+ return b, errreq
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+ u.Lock()
+ defer u.Unlock()
+ if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+ return
+ }
+
+ t := u.typ
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.v1extensions = invalidField
+ u.sizecache = invalidField
+
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ if reflect.PtrTo(t).Implements(marshalerType) {
+ u.hasmarshaler = true
+ atomic.StoreInt32(&u.initialized, 1)
+ return
+ }
+
+ // get oneof implementers
+ var oneofImplementers []interface{}
+ if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ _, _, _, oneofImplementers = m.XXX_OneofFuncs()
+ }
+
+ n := t.NumField()
+
+ // deal with XXX fields first
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if !strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ switch f.Name {
+ case "XXX_sizecache":
+ u.sizecache = toField(&f)
+ case "XXX_unrecognized":
+ u.unrecognized = toField(&f)
+ case "XXX_InternalExtensions":
+ u.extensions = toField(&f)
+ u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+ case "XXX_extensions":
+ u.v1extensions = toField(&f)
+ case "XXX_NoUnkeyedLiteral":
+ // nothing to do
+ default:
+ panic("unknown XXX field: " + f.Name)
+ }
+ n--
+ }
+
+ // normal fields
+ fields := make([]marshalFieldInfo, n) // batch allocation
+ u.fields = make([]*marshalFieldInfo, 0, n)
+ for i, j := 0, 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ field := &fields[j]
+ j++
+ field.name = f.Name
+ u.fields = append(u.fields, field)
+ if f.Tag.Get("protobuf_oneof") != "" {
+ field.computeOneofFieldInfo(&f, oneofImplementers)
+ continue
+ }
+ if f.Tag.Get("protobuf") == "" {
+ // field has no tag (not in generated message), ignore it
+ u.fields = u.fields[:len(u.fields)-1]
+ j--
+ continue
+ }
+ field.computeMarshalFieldInfo(&f)
+ }
+
+ // fields are marshaled in tag order on the wire.
+ sort.Sort(byTag(u.fields))
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int { return len(a) }
+func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+ // get from cache first
+ u.RLock()
+ e, ok := u.extElems[desc.Field]
+ u.RUnlock()
+ if ok {
+ return e
+ }
+
+ t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+ tags := strings.Split(desc.Tag, ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(t, tags, false, false)
+ e = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ isptr: t.Kind() == reflect.Ptr,
+ }
+
+ // update cache
+ u.Lock()
+ if u.extElems == nil {
+ u.extElems = make(map[int32]*marshalElemInfo)
+ }
+ u.extElems[desc.Field] = e
+ u.Unlock()
+ return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+ // parse protobuf tag of the field.
+ // tag has format of "bytes,49,opt,name=foo,def=hello!"
+ tags := strings.Split(f.Tag.Get("protobuf"), ",")
+ if tags[0] == "" {
+ return
+ }
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ if tags[2] == "req" {
+ fi.required = true
+ }
+ fi.setTag(f, tag, wt)
+ fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+ fi.field = toField(f)
+ fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+ fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+ ityp := f.Type // interface type
+ for _, o := range oneofImplementers {
+ t := reflect.TypeOf(o)
+ if !t.Implements(ityp) {
+ continue
+ }
+ sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+ tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+ tag, err := strconv.Atoi(tags[1])
+ if err != nil {
+ panic("tag is not an integer")
+ }
+ wt := wiretype(tags[0])
+ sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+ fi.oneofElems[t.Elem()] = &marshalElemInfo{
+ wiretag: uint64(tag)<<3 | wt,
+ tagsize: SizeVarint(uint64(tag) << 3),
+ sizer: sizer,
+ marshaler: marshaler,
+ }
+ }
+}
+
+type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+ switch encoding {
+ case "fixed32":
+ return WireFixed32
+ case "fixed64":
+ return WireFixed64
+ case "varint", "zigzag32", "zigzag64":
+ return WireVarint
+ case "bytes":
+ return WireBytes
+ case "group":
+ return WireStartGroup
+ }
+ panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+ fi.field = toField(f)
+ fi.wiretag = uint64(tag)<<3 | wt
+ fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+ switch f.Type.Kind() {
+ case reflect.Map:
+ // map field
+ fi.isPointer = true
+ fi.sizer, fi.marshaler = makeMapMarshaler(f)
+ return
+ case reflect.Ptr, reflect.Slice:
+ fi.isPointer = true
+ }
+ fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+ encoding := tags[0]
+
+ pointer := false
+ slice := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ packed := false
+ proto3 := false
+ for i := 2; i < len(tags); i++ {
+ if tags[i] == "packed" {
+ packed = true
+ }
+ if tags[i] == "proto3" {
+ proto3 = true
+ }
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return sizeBoolPtr, appendBoolPtr
+ }
+ if slice {
+ if packed {
+ return sizeBoolPackedSlice, appendBoolPackedSlice
+ }
+ return sizeBoolSlice, appendBoolSlice
+ }
+ if nozero {
+ return sizeBoolValueNoZero, appendBoolValueNoZero
+ }
+ return sizeBoolValue, appendBoolValue
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixed32Ptr, appendFixed32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed32PackedSlice, appendFixed32PackedSlice
+ }
+ return sizeFixed32Slice, appendFixed32Slice
+ }
+ if nozero {
+ return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+ }
+ return sizeFixed32Value, appendFixed32Value
+ case "varint":
+ if pointer {
+ return sizeVarint32Ptr, appendVarint32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint32PackedSlice, appendVarint32PackedSlice
+ }
+ return sizeVarint32Slice, appendVarint32Slice
+ }
+ if nozero {
+ return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+ }
+ return sizeVarint32Value, appendVarint32Value
+ }
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return sizeFixedS32Ptr, appendFixedS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+ }
+ return sizeFixedS32Slice, appendFixedS32Slice
+ }
+ if nozero {
+ return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+ }
+ return sizeFixedS32Value, appendFixedS32Value
+ case "varint":
+ if pointer {
+ return sizeVarintS32Ptr, appendVarintS32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+ }
+ return sizeVarintS32Slice, appendVarintS32Slice
+ }
+ if nozero {
+ return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+ }
+ return sizeVarintS32Value, appendVarintS32Value
+ case "zigzag32":
+ if pointer {
+ return sizeZigzag32Ptr, appendZigzag32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+ }
+ return sizeZigzag32Slice, appendZigzag32Slice
+ }
+ if nozero {
+ return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+ }
+ return sizeZigzag32Value, appendZigzag32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixed64Ptr, appendFixed64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixed64PackedSlice, appendFixed64PackedSlice
+ }
+ return sizeFixed64Slice, appendFixed64Slice
+ }
+ if nozero {
+ return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+ }
+ return sizeFixed64Value, appendFixed64Value
+ case "varint":
+ if pointer {
+ return sizeVarint64Ptr, appendVarint64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarint64PackedSlice, appendVarint64PackedSlice
+ }
+ return sizeVarint64Slice, appendVarint64Slice
+ }
+ if nozero {
+ return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+ }
+ return sizeVarint64Value, appendVarint64Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return sizeFixedS64Ptr, appendFixedS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+ }
+ return sizeFixedS64Slice, appendFixedS64Slice
+ }
+ if nozero {
+ return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+ }
+ return sizeFixedS64Value, appendFixedS64Value
+ case "varint":
+ if pointer {
+ return sizeVarintS64Ptr, appendVarintS64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+ }
+ return sizeVarintS64Slice, appendVarintS64Slice
+ }
+ if nozero {
+ return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+ }
+ return sizeVarintS64Value, appendVarintS64Value
+ case "zigzag64":
+ if pointer {
+ return sizeZigzag64Ptr, appendZigzag64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+ }
+ return sizeZigzag64Slice, appendZigzag64Slice
+ }
+ if nozero {
+ return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+ }
+ return sizeZigzag64Value, appendZigzag64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return sizeFloat32Ptr, appendFloat32Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat32PackedSlice, appendFloat32PackedSlice
+ }
+ return sizeFloat32Slice, appendFloat32Slice
+ }
+ if nozero {
+ return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+ }
+ return sizeFloat32Value, appendFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return sizeFloat64Ptr, appendFloat64Ptr
+ }
+ if slice {
+ if packed {
+ return sizeFloat64PackedSlice, appendFloat64PackedSlice
+ }
+ return sizeFloat64Slice, appendFloat64Slice
+ }
+ if nozero {
+ return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+ }
+ return sizeFloat64Value, appendFloat64Value
+ case reflect.String:
+ if pointer {
+ return sizeStringPtr, appendStringPtr
+ }
+ if slice {
+ return sizeStringSlice, appendStringSlice
+ }
+ if nozero {
+ return sizeStringValueNoZero, appendStringValueNoZero
+ }
+ return sizeStringValue, appendStringValue
+ case reflect.Slice:
+ if slice {
+ return sizeBytesSlice, appendBytesSlice
+ }
+ if oneof {
+ // Oneof bytes field may also have "proto3" tag.
+ // We want to marshal it as a oneof field. Do this
+ // check before the proto3 check.
+ return sizeBytesOneof, appendBytesOneof
+ }
+ if proto3 {
+ return sizeBytes3, appendBytes3
+ }
+ return sizeBytes, appendBytes
+ case reflect.Struct:
+ switch encoding {
+ case "group":
+ if slice {
+ return makeGroupSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeGroupMarshaler(getMarshalInfo(t))
+ case "bytes":
+ if slice {
+ return makeMessageSliceMarshaler(getMarshalInfo(t))
+ }
+ return makeMessageMarshaler(getMarshalInfo(t))
+ }
+ }
+ panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+ return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return 0
+ }
+ return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+ return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return 0
+ }
+ return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v) + tagsize
+ }
+ return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v)) + tagsize
+ }
+ return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+ }
+ return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return 0
+ }
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+ }
+ return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return 0
+ }
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+ return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toBool()
+ if !v {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return 0
+ }
+ return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return 0
+ }
+ return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+ v := *ptr.toString()
+ if v == "" {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return 0
+ }
+ v := *p
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toStringSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if v == nil {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return 0
+ }
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+ v := *ptr.toBytes()
+ return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+ s := *ptr.toBytesSlice()
+ n := 0
+ for _, v := range s {
+ n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+ }
+ return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24))
+ return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+ b = append(b,
+ byte(v),
+ byte(v>>8),
+ byte(v>>16),
+ byte(v>>24),
+ byte(v>>32),
+ byte(v>>40),
+ byte(v>>48),
+ byte(v>>56))
+ return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+ // TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+ // have non-leaf inliner.
+ switch {
+ case v < 1<<7:
+ b = append(b, byte(v))
+ case v < 1<<14:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte(v>>7))
+ case v < 1<<21:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte(v>>14))
+ case v < 1<<28:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte(v>>21))
+ case v < 1<<35:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte(v>>28))
+ case v < 1<<42:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte(v>>35))
+ case v < 1<<49:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte(v>>42))
+ case v < 1<<56:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte(v>>49))
+ case v < 1<<63:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte(v>>56))
+ default:
+ b = append(b,
+ byte(v&0x7f|0x80),
+ byte((v>>7)&0x7f|0x80),
+ byte((v>>14)&0x7f|0x80),
+ byte((v>>21)&0x7f|0x80),
+ byte((v>>28)&0x7f|0x80),
+ byte((v>>35)&0x7f|0x80),
+ byte((v>>42)&0x7f|0x80),
+ byte((v>>49)&0x7f|0x80),
+ byte((v>>56)&0x7f|0x80),
+ 1)
+ }
+ return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, *p)
+ return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, v)
+ }
+ return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(*p))
+ return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, uint32(v))
+ }
+ return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float32bits(*ptr.toFloat32())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, v)
+ return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(*p))
+ return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(4*len(s)))
+ for _, v := range s {
+ b = appendFixed32(b, math.Float32bits(v))
+ }
+ return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, *p)
+ return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, v)
+ }
+ return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(*p))
+ return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, uint64(v))
+ }
+ return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := math.Float64bits(*ptr.toFloat64())
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, v)
+ return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toFloat64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(*p))
+ return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toFloat64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(8*len(s)))
+ for _, v := range s {
+ b = appendFixed64(b, math.Float64bits(v))
+ }
+ return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toUint64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toUint64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, *p)
+ return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toUint64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(v)
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, v)
+ }
+ return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(*p))
+ return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v))
+ }
+ return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt32()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := ptr.getInt32Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := ptr.getInt32Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+ }
+ return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toInt64()
+ if v == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toInt64Ptr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ v := *p
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toInt64Slice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ // compute size
+ n := 0
+ for _, v := range s {
+ n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+ }
+ b = appendVarint(b, uint64(n))
+ for _, v := range s {
+ b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+ }
+ return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBool()
+ if !v {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = append(b, 1)
+ return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toBoolPtr()
+ if p == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ if *p {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBoolSlice()
+ if len(s) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag&^7|WireBytes)
+ b = appendVarint(b, uint64(len(s)))
+ for _, v := range s {
+ if v {
+ b = append(b, 1)
+ } else {
+ b = append(b, 0)
+ }
+ }
+ return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toString()
+ if v == "" {
+ return b, nil
+ }
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ p := *ptr.toStringPtr()
+ if p == nil {
+ return b, nil
+ }
+ v := *p
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toStringSlice()
+ for _, v := range s {
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if v == nil {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ if len(v) == 0 {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ v := *ptr.toBytes()
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+ s := *ptr.toBytesSlice()
+ for _, v := range s {
+ b = appendVarint(b, wiretag)
+ b = appendVarint(b, uint64(len(v)))
+ b = append(b, v...)
+ }
+ return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ return u.size(p) + 2*tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ var err error
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, p, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ return b, err
+ }
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ n += u.size(v) + 2*tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err, errreq error
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag) // start group
+ b, err = u.marshal(b, v, deterministic)
+ b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+ if err != nil {
+ if _, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = err
+ }
+ continue
+ }
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, errreq
+ }
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return 0
+ }
+ siz := u.size(p)
+ return siz + SizeVarint(uint64(siz)) + tagsize
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getPointer()
+ if p.isNil() {
+ return b, nil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(p)
+ b = appendVarint(b, uint64(siz))
+ return u.marshal(b, p, deterministic)
+ }
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+ return func(ptr pointer, tagsize int) int {
+ s := ptr.getPointerSlice()
+ n := 0
+ for _, v := range s {
+ if v.isNil() {
+ continue
+ }
+ siz := u.size(v)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+ s := ptr.getPointerSlice()
+ var err, errreq error
+ for _, v := range s {
+ if v.isNil() {
+ return b, errRepeatedHasNil
+ }
+ b = appendVarint(b, wiretag)
+ siz := u.cachedsize(v)
+ b = appendVarint(b, uint64(siz))
+ b, err = u.marshal(b, v, deterministic)
+
+ if err != nil {
+ if _, ok := err.(*RequiredNotSetError); ok {
+ // Required field in submessage is not set.
+ // We record the error but keep going, to give a complete marshaling.
+ if errreq == nil {
+ errreq = err
+ }
+ continue
+ }
+ if err == ErrNil {
+ err = errRepeatedHasNil
+ }
+ return b, err
+ }
+ }
+ return b, errreq
+ }
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+ // figure out key and value type
+ t := f.Type
+ keyType := t.Key()
+ valType := t.Elem()
+ keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+ valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+ keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+ valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+ keyWireTag := 1<<3 | wiretype(keyTags[0])
+ valWireTag := 2<<3 | wiretype(valTags[0])
+
+ // We create an interface to get the addresses of the map key and value.
+ // If value is pointer-typed, the interface is a direct interface, the
+ // idata itself is the value. Otherwise, the idata is the pointer to the
+ // value.
+ // Key cannot be pointer-typed.
+ valIsPtr := valType.Kind() == reflect.Ptr
+ return func(ptr pointer, tagsize int) int {
+ m := ptr.asPointerTo(t).Elem() // the map
+ n := 0
+ for _, k := range m.MapKeys() {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ n += siz + SizeVarint(uint64(siz)) + tagsize
+ }
+ return n
+ },
+ func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+ m := ptr.asPointerTo(t).Elem() // the map
+ var err error
+ keys := m.MapKeys()
+ if len(keys) > 1 && deterministic {
+ sort.Sort(mapKeys(keys))
+ }
+ for _, k := range keys {
+ ki := k.Interface()
+ vi := m.MapIndex(k).Interface()
+ kaddr := toAddrPointer(&ki, false) // pointer to key
+ vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+ b = appendVarint(b, tag)
+ siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+ b = appendVarint(b, uint64(siz))
+ b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+ if err != nil && err != ErrNil { // allow nil value in map
+ return b, err
+ }
+ }
+ return b, nil
+ }
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+ // Oneof field is an interface. We need to get the actual data type on the fly.
+ t := f.Type
+ return func(ptr pointer, _ int) int {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return 0
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ e := fi.oneofElems[telem]
+ return e.sizer(p, e.tagsize)
+ },
+ func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+ p := ptr.getInterfacePointer()
+ if p.isNil() {
+ return b, nil
+ }
+ v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+ telem := v.Type()
+ if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+ return b, errOneofHasNil
+ }
+ e := fi.oneofElems[telem]
+ return e.marshaler(b, p, e.wiretag, deterministic)
+ }
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ // Not sure this is required, but the old code does it.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// message set format is:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return 0
+ }
+ mu.Lock()
+
+ n := 0
+ for id, e := range m {
+ n += 2 // start group, end group. tag = 1 (size=1)
+ n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ siz := len(msgWithLen)
+ n += siz + 1 // message, tag = 3 (size=1)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+ }
+ mu.Unlock()
+ return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+ m, mu := ext.extensionsRead()
+ if m == nil {
+ return b, nil
+ }
+ mu.Lock()
+ defer mu.Unlock()
+
+ var err error
+
+ // Fast-path for common cases: zero or one extensions.
+ // Don't bother sorting the keys.
+ if len(m) <= 1 {
+ for id, e := range m {
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ if err != nil {
+ return b, err
+ }
+ b = append(b, 1<<3|WireEndGroup)
+ }
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, id := range keys {
+ e := m[int32(id)]
+ b = append(b, 1<<3|WireStartGroup)
+ b = append(b, 2<<3|WireVarint)
+ b = appendVarint(b, uint64(id))
+
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+ b = append(b, 3<<3|WireBytes)
+ b = append(b, msgWithLen...)
+ b = append(b, 1<<3|WireEndGroup)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+ b = append(b, 1<<3|WireEndGroup)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+ if m == nil {
+ return 0
+ }
+
+ n := 0
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ n += ei.sizer(p, ei.tagsize)
+ }
+ return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+ if m == nil {
+ return b, nil
+ }
+
+ // Sort the keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(m))
+ for k := range m {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ var err error
+ for _, k := range keys {
+ e := m[int32(k)]
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ b = append(b, e.enc...)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ ei := u.getExtElemInfo(e.desc)
+ v := e.value
+ p := toAddrPointer(&v, ei.isptr)
+ b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+ if err != nil {
+ return b, err
+ }
+ }
+ return b, nil
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+ XXX_Size() int
+ XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+ if m, ok := pb.(newMarshaler); ok {
+ return m.XXX_Size()
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, _ := m.Marshal()
+ return len(b)
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return 0
+ }
+ var info InternalMessageInfo
+ return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ b := make([]byte, 0, siz)
+ return m.XXX_Marshal(b, false)
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ return m.Marshal()
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return nil, ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ b := make([]byte, 0, siz)
+ return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+ var err error
+ if m, ok := pb.(newMarshaler); ok {
+ siz := m.XXX_Size()
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+ return err
+ }
+ if m, ok := pb.(Marshaler); ok {
+ // If the message can marshal itself, let it do it, for compatibility.
+ // NOTE: This is not efficient.
+ b, err := m.Marshal()
+ p.buf = append(p.buf, b...)
+ return err
+ }
+ // in case somehow we didn't generate the wrapper
+ if pb == nil {
+ return ErrNil
+ }
+ var info InternalMessageInfo
+ siz := info.Size(pb)
+ p.grow(siz) // make sure buf has enough capacity
+ p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+ return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+ need := len(p.buf) + n
+ if need <= cap(p.buf) {
+ return
+ }
+ newCap := len(p.buf) * 2
+ if newCap < need {
+ newCap = need
+ }
+ p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 00000000..5525def6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+ mi := atomicLoadMergeInfo(&a.merge)
+ if mi == nil {
+ mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+ atomicStoreMergeInfo(&a.merge, mi)
+ }
+ mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+ typ reflect.Type
+
+ initialized int32 // 0: only typ is valid, 1: everything is valid
+ lock sync.Mutex
+
+ fields []mergeFieldInfo
+ unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+ field field // Offset of field, guaranteed to be valid
+
+ // isPointer reports whether the value in the field is a pointer.
+ // This is true for the following situations:
+ // * Pointer to struct
+ // * Pointer to basic type (proto2 only)
+ // * Slice (first value in slice header is a pointer)
+ // * String (first value in string header is a pointer)
+ isPointer bool
+
+ // basicWidth reports the width of the field assuming that it is directly
+ // embedded in the struct (as is the case for basic types in proto3).
+ // The possible values are:
+ // 0: invalid
+ // 1: bool
+ // 4: int32, uint32, float32
+ // 8: int64, uint64, float64
+ basicWidth int
+
+ // Where dst and src are pointers to the types being merged.
+ merge func(dst, src pointer)
+}
+
+var (
+ mergeInfoMap = map[reflect.Type]*mergeInfo{}
+ mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+ mergeInfoLock.Lock()
+ defer mergeInfoLock.Unlock()
+ mi := mergeInfoMap[t]
+ if mi == nil {
+ mi = &mergeInfo{typ: t}
+ mergeInfoMap[t] = mi
+ }
+ return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+ if dst.isNil() {
+ panic("proto: nil destination")
+ }
+ if src.isNil() {
+ return // Nothing to do.
+ }
+
+ if atomic.LoadInt32(&mi.initialized) == 0 {
+ mi.computeMergeInfo()
+ }
+
+ for _, fi := range mi.fields {
+ sfp := src.offset(fi.field)
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+ continue
+ }
+ if fi.basicWidth > 0 {
+ switch {
+ case fi.basicWidth == 1 && !*sfp.toBool():
+ continue
+ case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+ continue
+ case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+ continue
+ }
+ }
+ }
+
+ dfp := dst.offset(fi.field)
+ fi.merge(dfp, sfp)
+ }
+
+ // TODO: Make this faster?
+ out := dst.asPointerTo(mi.typ).Elem()
+ in := src.asPointerTo(mi.typ).Elem()
+ if emIn, err := extendable(in.Addr().Interface()); err == nil {
+ emOut, _ := extendable(out.Addr().Interface())
+ mIn, muIn := emIn.extensionsRead()
+ if mIn != nil {
+ mOut := emOut.extensionsWrite()
+ muIn.Lock()
+ mergeExtension(mOut, mIn)
+ muIn.Unlock()
+ }
+ }
+
+ if mi.unrecognized.IsValid() {
+ if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+ *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+ }
+ }
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+ mi.lock.Lock()
+ defer mi.lock.Unlock()
+ if mi.initialized != 0 {
+ return
+ }
+ t := mi.typ
+ n := t.NumField()
+
+ props := GetProperties(t)
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+
+ mfi := mergeFieldInfo{field: toField(&f)}
+ tf := f.Type
+
+ // As an optimization, we can avoid the merge function call cost
+ // if we know for sure that the source will have no effect
+ // by checking if it is the zero value.
+ if unsafeAllowed {
+ switch tf.Kind() {
+ case reflect.Ptr, reflect.Slice, reflect.String:
+ // As a special case, we assume slices and strings are pointers
+ // since we know that the first field in the SliceSlice or
+ // StringHeader is a data pointer.
+ mfi.isPointer = true
+ case reflect.Bool:
+ mfi.basicWidth = 1
+ case reflect.Int32, reflect.Uint32, reflect.Float32:
+ mfi.basicWidth = 4
+ case reflect.Int64, reflect.Uint64, reflect.Float64:
+ mfi.basicWidth = 8
+ }
+ }
+
+ // Unwrap tf to get at its most basic type.
+ var isPointer, isSlice bool
+ if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+ isSlice = true
+ tf = tf.Elem()
+ }
+ if tf.Kind() == reflect.Ptr {
+ isPointer = true
+ tf = tf.Elem()
+ }
+ if isPointer && isSlice && tf.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + tf.Name())
+ }
+
+ switch tf.Kind() {
+ case reflect.Int32:
+ switch {
+ case isSlice: // E.g., []int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+ /*
+ sfsp := src.toInt32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ */
+ sfs := src.getInt32Slice()
+ if sfs != nil {
+ dfs := dst.getInt32Slice()
+ dfs = append(dfs, sfs...)
+ if dfs == nil {
+ dfs = []int32{}
+ }
+ dst.setInt32Slice(dfs)
+ }
+ }
+ case isPointer: // E.g., *int32
+ mfi.merge = func(dst, src pointer) {
+ // NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+ /*
+ sfpp := src.toInt32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt32Ptr()
+ if *dfpp == nil {
+ *dfpp = Int32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ */
+ sfp := src.getInt32Ptr()
+ if sfp != nil {
+ dfp := dst.getInt32Ptr()
+ if dfp == nil {
+ dst.setInt32Ptr(*sfp)
+ } else {
+ *dfp = *sfp
+ }
+ }
+ }
+ default: // E.g., int32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt32(); v != 0 {
+ *dst.toInt32() = v
+ }
+ }
+ }
+ case reflect.Int64:
+ switch {
+ case isSlice: // E.g., []int64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toInt64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toInt64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []int64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *int64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toInt64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toInt64Ptr()
+ if *dfpp == nil {
+ *dfpp = Int64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., int64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toInt64(); v != 0 {
+ *dst.toInt64() = v
+ }
+ }
+ }
+ case reflect.Uint32:
+ switch {
+ case isSlice: // E.g., []uint32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint32Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint32(); v != 0 {
+ *dst.toUint32() = v
+ }
+ }
+ }
+ case reflect.Uint64:
+ switch {
+ case isSlice: // E.g., []uint64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toUint64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toUint64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []uint64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *uint64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toUint64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toUint64Ptr()
+ if *dfpp == nil {
+ *dfpp = Uint64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., uint64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toUint64(); v != 0 {
+ *dst.toUint64() = v
+ }
+ }
+ }
+ case reflect.Float32:
+ switch {
+ case isSlice: // E.g., []float32
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat32Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat32Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float32{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float32
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat32Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat32Ptr()
+ if *dfpp == nil {
+ *dfpp = Float32(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float32
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat32(); v != 0 {
+ *dst.toFloat32() = v
+ }
+ }
+ }
+ case reflect.Float64:
+ switch {
+ case isSlice: // E.g., []float64
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toFloat64Slice()
+ if *sfsp != nil {
+ dfsp := dst.toFloat64Slice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []float64{}
+ }
+ }
+ }
+ case isPointer: // E.g., *float64
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toFloat64Ptr()
+ if *sfpp != nil {
+ dfpp := dst.toFloat64Ptr()
+ if *dfpp == nil {
+ *dfpp = Float64(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., float64
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toFloat64(); v != 0 {
+ *dst.toFloat64() = v
+ }
+ }
+ }
+ case reflect.Bool:
+ switch {
+ case isSlice: // E.g., []bool
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toBoolSlice()
+ if *sfsp != nil {
+ dfsp := dst.toBoolSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []bool{}
+ }
+ }
+ }
+ case isPointer: // E.g., *bool
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toBoolPtr()
+ if *sfpp != nil {
+ dfpp := dst.toBoolPtr()
+ if *dfpp == nil {
+ *dfpp = Bool(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., bool
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toBool(); v {
+ *dst.toBool() = v
+ }
+ }
+ }
+ case reflect.String:
+ switch {
+ case isSlice: // E.g., []string
+ mfi.merge = func(dst, src pointer) {
+ sfsp := src.toStringSlice()
+ if *sfsp != nil {
+ dfsp := dst.toStringSlice()
+ *dfsp = append(*dfsp, *sfsp...)
+ if *dfsp == nil {
+ *dfsp = []string{}
+ }
+ }
+ }
+ case isPointer: // E.g., *string
+ mfi.merge = func(dst, src pointer) {
+ sfpp := src.toStringPtr()
+ if *sfpp != nil {
+ dfpp := dst.toStringPtr()
+ if *dfpp == nil {
+ *dfpp = String(**sfpp)
+ } else {
+ **dfpp = **sfpp
+ }
+ }
+ }
+ default: // E.g., string
+ mfi.merge = func(dst, src pointer) {
+ if v := *src.toString(); v != "" {
+ *dst.toString() = v
+ }
+ }
+ }
+ case reflect.Slice:
+ isProto3 := props.Prop[i].proto3
+ switch {
+ case isPointer:
+ panic("bad pointer in byte slice case in " + tf.Name())
+ case tf.Elem().Kind() != reflect.Uint8:
+ panic("bad element kind in byte slice case in " + tf.Name())
+ case isSlice: // E.g., [][]byte
+ mfi.merge = func(dst, src pointer) {
+ sbsp := src.toBytesSlice()
+ if *sbsp != nil {
+ dbsp := dst.toBytesSlice()
+ for _, sb := range *sbsp {
+ if sb == nil {
+ *dbsp = append(*dbsp, nil)
+ } else {
+ *dbsp = append(*dbsp, append([]byte{}, sb...))
+ }
+ }
+ if *dbsp == nil {
+ *dbsp = [][]byte{}
+ }
+ }
+ }
+ default: // E.g., []byte
+ mfi.merge = func(dst, src pointer) {
+ sbp := src.toBytes()
+ if *sbp != nil {
+ dbp := dst.toBytes()
+ if !isProto3 || len(*sbp) > 0 {
+ *dbp = append([]byte{}, *sbp...)
+ }
+ }
+ }
+ }
+ case reflect.Struct:
+ switch {
+ case !isPointer:
+ panic(fmt.Sprintf("message field %s without pointer", tf))
+ case isSlice: // E.g., []*pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sps := src.getPointerSlice()
+ if sps != nil {
+ dps := dst.getPointerSlice()
+ for _, sp := range sps {
+ var dp pointer
+ if !sp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ mi.merge(dp, sp)
+ }
+ dps = append(dps, dp)
+ }
+ if dps == nil {
+ dps = []pointer{}
+ }
+ dst.setPointerSlice(dps)
+ }
+ }
+ default: // E.g., *pb.T
+ mi := getMergeInfo(tf)
+ mfi.merge = func(dst, src pointer) {
+ sp := src.getPointer()
+ if !sp.isNil() {
+ dp := dst.getPointer()
+ if dp.isNil() {
+ dp = valToPointer(reflect.New(tf))
+ dst.setPointer(dp)
+ }
+ mi.merge(dp, sp)
+ }
+ }
+ }
+ case reflect.Map:
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in map case in " + tf.Name())
+ default: // E.g., map[K]V
+ mfi.merge = func(dst, src pointer) {
+ sm := src.asPointerTo(tf).Elem()
+ if sm.Len() == 0 {
+ return
+ }
+ dm := dst.asPointerTo(tf).Elem()
+ if dm.IsNil() {
+ dm.Set(reflect.MakeMap(tf))
+ }
+
+ switch tf.Elem().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(Clone(val.Interface().(Message)))
+ dm.SetMapIndex(key, val)
+ }
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ dm.SetMapIndex(key, val)
+ }
+ default: // Basic type (e.g., string)
+ for _, key := range sm.MapKeys() {
+ val := sm.MapIndex(key)
+ dm.SetMapIndex(key, val)
+ }
+ }
+ }
+ }
+ case reflect.Interface:
+ // Must be oneof field.
+ switch {
+ case isPointer || isSlice:
+ panic("bad pointer or slice in interface case in " + tf.Name())
+ default: // E.g., interface{}
+ // TODO: Make this faster?
+ mfi.merge = func(dst, src pointer) {
+ su := src.asPointerTo(tf).Elem()
+ if !su.IsNil() {
+ du := dst.asPointerTo(tf).Elem()
+ typ := su.Elem().Type()
+ if du.IsNil() || du.Elem().Type() != typ {
+ du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+ }
+ sv := su.Elem().Elem().Field(0)
+ if sv.Kind() == reflect.Ptr && sv.IsNil() {
+ return
+ }
+ dv := du.Elem().Elem().Field(0)
+ if dv.Kind() == reflect.Ptr && dv.IsNil() {
+ dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+ }
+ switch sv.Type().Kind() {
+ case reflect.Ptr: // Proto struct (e.g., *T)
+ Merge(dv.Interface().(Message), sv.Interface().(Message))
+ case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+ dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+ default: // Basic type (e.g., string)
+ dv.Set(sv)
+ }
+ }
+ }
+ }
+ default:
+ panic(fmt.Sprintf("merger not found for type:%s", tf))
+ }
+ mi.fields = append(mi.fields, mfi)
+ }
+
+ mi.unrecognized = invalidField
+ if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+ if f.Type != reflect.TypeOf([]byte{}) {
+ panic("expected XXX_unrecognized to be of type []byte")
+ }
+ mi.unrecognized = toField(&f)
+ }
+
+ atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 00000000..55f0340a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,1967 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+ // Load the unmarshal information for this message type.
+ // The atomic load ensures memory consistency.
+ u := atomicLoadUnmarshalInfo(&a.unmarshal)
+ if u == nil {
+ // Slow path: find unmarshal info for msg, update a with it.
+ u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+ atomicStoreUnmarshalInfo(&a.unmarshal, u)
+ }
+ // Then do the unmarshaling.
+ err := u.unmarshal(toPointer(&msg), b)
+ return err
+}
+
+type unmarshalInfo struct {
+ typ reflect.Type // type of the protobuf struct
+
+ // 0 = only typ field is initialized
+ // 1 = completely initialized
+ initialized int32
+ lock sync.Mutex // prevents double initialization
+ dense []unmarshalFieldInfo // fields indexed by tag #
+ sparse map[uint64]unmarshalFieldInfo // fields indexed by tag #
+ reqFields []string // names of required fields
+ reqMask uint64 // 1<<len(reqFields)-1
+ unrecognized field // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+ extensions field // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+ oldExtensions field // offset of old-form extensions field (of type map[int]Extension)
+ extensionRanges []ExtensionRange // if non-nil, implies extensions field is valid
+ isMessageSet bool // if true, implies extensions field is valid
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+ // location of the field in the proto message structure.
+ field field
+
+ // function to unmarshal the data for the field.
+ unmarshal unmarshaler
+
+ // if a required field, contains a single set bit at this field's index in the required field list.
+ reqMask uint64
+}
+
+var (
+ unmarshalInfoMap = map[reflect.Type]*unmarshalInfo{}
+ unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+ // It would be correct to return a new unmarshalInfo
+ // unconditionally. We would end up allocating one
+ // per occurrence of that type as a message or submessage.
+ // We use a cache here just to reduce memory usage.
+ unmarshalInfoLock.Lock()
+ defer unmarshalInfoLock.Unlock()
+ u := unmarshalInfoMap[t]
+ if u == nil {
+ u = &unmarshalInfo{typ: t}
+ // Note: we just set the type here. The rest of the fields
+ // will be initialized on first use.
+ unmarshalInfoMap[t] = u
+ }
+ return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+ if atomic.LoadInt32(&u.initialized) == 0 {
+ u.computeUnmarshalInfo()
+ }
+ if u.isMessageSet {
+ return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+ }
+ var reqMask uint64 // bitmask of required fields we've seen.
+ var rnse *RequiredNotSetError // an instance of a RequiredNotSetError returned by a submessage.
+ for len(b) > 0 {
+ // Read tag and wire type.
+ // Special case 1 and 2 byte varints.
+ var x uint64
+ if b[0] < 128 {
+ x = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ }
+ tag := x >> 3
+ wire := int(x) & 7
+
+ // Dispatch on the tag to one of the unmarshal* functions below.
+ var f unmarshalFieldInfo
+ if tag < uint64(len(u.dense)) {
+ f = u.dense[tag]
+ } else {
+ f = u.sparse[tag]
+ }
+ if fn := f.unmarshal; fn != nil {
+ var err error
+ b, err = fn(b, m.offset(f.field), wire)
+ if err == nil {
+ reqMask |= f.reqMask
+ continue
+ }
+ if r, ok := err.(*RequiredNotSetError); ok {
+ // Remember this error, but keep parsing. We need to produce
+ // a full parse even if a required field is missing.
+ rnse = r
+ reqMask |= f.reqMask
+ continue
+ }
+ if err != errInternalBadWireType {
+ return err
+ }
+ // Fragments with bad wire type are treated as unknown fields.
+ }
+
+ // Unknown tag.
+ if !u.unrecognized.IsValid() {
+ // Don't keep unrecognized data; just skip it.
+ var err error
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+ // Keep unrecognized data around.
+ // maybe in extensions, maybe in the unrecognized field.
+ z := m.offset(u.unrecognized).toBytes()
+ var emap map[int32]Extension
+ var e Extension
+ for _, r := range u.extensionRanges {
+ if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+ if u.extensions.IsValid() {
+ mp := m.offset(u.extensions).toExtensions()
+ emap = mp.extensionsWrite()
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ if u.oldExtensions.IsValid() {
+ p := m.offset(u.oldExtensions).toOldExtensions()
+ emap = *p
+ if emap == nil {
+ emap = map[int32]Extension{}
+ *p = emap
+ }
+ e = emap[int32(tag)]
+ z = &e.enc
+ break
+ }
+ panic("no extensions field available")
+ }
+ }
+
+ // Use wire type to skip data.
+ var err error
+ b0 := b
+ b, err = skipField(b, wire)
+ if err != nil {
+ return err
+ }
+ *z = encodeVarint(*z, tag<<3|uint64(wire))
+ *z = append(*z, b0[:len(b0)-len(b)]...)
+
+ if emap != nil {
+ emap[int32(tag)] = e
+ }
+ }
+ if rnse != nil {
+ // A required field of a submessage/group is missing. Return that error.
+ return rnse
+ }
+ if reqMask != u.reqMask {
+ // A required field of this message is missing.
+ for _, n := range u.reqFields {
+ if reqMask&1 == 0 {
+ return &RequiredNotSetError{n}
+ }
+ reqMask >>= 1
+ }
+ }
+ return nil
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+ u.lock.Lock()
+ defer u.lock.Unlock()
+ if u.initialized != 0 {
+ return
+ }
+ t := u.typ
+ n := t.NumField()
+
+ // Set up the "not found" value for the unrecognized byte buffer.
+ // This is the default for proto3.
+ u.unrecognized = invalidField
+ u.extensions = invalidField
+ u.oldExtensions = invalidField
+
+ // List of the generated type and offset for each oneof field.
+ type oneofField struct {
+ ityp reflect.Type // interface type of oneof field
+ field field // offset in containing message
+ }
+ var oneofFields []oneofField
+
+ for i := 0; i < n; i++ {
+ f := t.Field(i)
+ if f.Name == "XXX_unrecognized" {
+ // The byte slice used to hold unrecognized input is special.
+ if f.Type != reflect.TypeOf(([]byte)(nil)) {
+ panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+ }
+ u.unrecognized = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_InternalExtensions" {
+ // Ditto here.
+ if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+ panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+ }
+ u.extensions = toField(&f)
+ if f.Tag.Get("protobuf_messageset") == "1" {
+ u.isMessageSet = true
+ }
+ continue
+ }
+ if f.Name == "XXX_extensions" {
+ // An older form of the extensions field.
+ if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+ panic("bad type for XXX_extensions field: " + f.Type.Name())
+ }
+ u.oldExtensions = toField(&f)
+ continue
+ }
+ if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+ continue
+ }
+
+ oneof := f.Tag.Get("protobuf_oneof")
+ if oneof != "" {
+ oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+ // The rest of oneof processing happens below.
+ continue
+ }
+
+ tags := f.Tag.Get("protobuf")
+ tagArray := strings.Split(tags, ",")
+ if len(tagArray) < 2 {
+ panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+ }
+ tag, err := strconv.Atoi(tagArray[1])
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagArray[1])
+ }
+
+ name := ""
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Extract unmarshaling function from the field (its type and tags).
+ unmarshal := fieldUnmarshaler(&f)
+
+ // Required field?
+ var reqMask uint64
+ if tagArray[2] == "req" {
+ bit := len(u.reqFields)
+ u.reqFields = append(u.reqFields, name)
+ reqMask = uint64(1) << uint(bit)
+ // TODO: if we have more than 64 required fields, we end up
+ // not verifying that all required fields are present.
+ // Fix this, perhaps using a count of required fields?
+ }
+
+ // Store the info in the correct slot in the message.
+ u.setTag(tag, toField(&f), unmarshal, reqMask)
+ }
+
+ // Find any types associated with oneof fields.
+ // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it?
+ fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
+ if fn.IsValid() {
+ res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
+ for i := res.Len() - 1; i >= 0; i-- {
+ v := res.Index(i) // interface{}
+ tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
+ typ := tptr.Elem() // Msg_X
+
+ f := typ.Field(0) // oneof implementers have one field
+ baseUnmarshal := fieldUnmarshaler(&f)
+ tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1]
+ tag, err := strconv.Atoi(tagstr)
+ if err != nil {
+ panic("protobuf tag field not an integer: " + tagstr)
+ }
+
+ // Find the oneof field that this struct implements.
+ // Might take O(n^2) to process all of the oneofs, but who cares.
+ for _, of := range oneofFields {
+ if tptr.Implements(of.ityp) {
+ // We have found the corresponding interface for this struct.
+ // That lets us know where this struct should be stored
+ // when we encounter it during unmarshaling.
+ unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+ u.setTag(tag, of.field, unmarshal, 0)
+ }
+ }
+ }
+ }
+
+ // Get extension ranges, if any.
+ fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+ if fn.IsValid() {
+ if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+ panic("a message with extensions, but no extensions field in " + t.Name())
+ }
+ u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+ }
+
+ // Explicitly disallow tag 0. This will ensure we flag an error
+ // when decoding a buffer of all zeros. Without this code, we
+ // would decode and skip an all-zero buffer of even length.
+ // [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+ u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+ return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+ }, 0)
+
+ // Set mask for required field check.
+ u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+ atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64) {
+ i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask}
+ n := u.typ.NumField()
+ if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+ for len(u.dense) <= tag {
+ u.dense = append(u.dense, unmarshalFieldInfo{})
+ }
+ u.dense[tag] = i
+ return
+ }
+ if u.sparse == nil {
+ u.sparse = map[uint64]unmarshalFieldInfo{}
+ }
+ u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+ if f.Type.Kind() == reflect.Map {
+ return makeUnmarshalMap(f)
+ }
+ return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+ tagArray := strings.Split(tags, ",")
+ encoding := tagArray[0]
+ name := "unknown"
+ for _, tag := range tagArray[3:] {
+ if strings.HasPrefix(tag, "name=") {
+ name = tag[5:]
+ }
+ }
+
+ // Figure out packaging (pointer, slice, or both)
+ slice := false
+ pointer := false
+ if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+ slice = true
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Ptr {
+ pointer = true
+ t = t.Elem()
+ }
+
+ // We'll never have both pointer and slice for basic types.
+ if pointer && slice && t.Kind() != reflect.Struct {
+ panic("both pointer and slice for basic type in " + t.Name())
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ if pointer {
+ return unmarshalBoolPtr
+ }
+ if slice {
+ return unmarshalBoolSlice
+ }
+ return unmarshalBoolValue
+ case reflect.Int32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixedS32Ptr
+ }
+ if slice {
+ return unmarshalFixedS32Slice
+ }
+ return unmarshalFixedS32Value
+ case "varint":
+ // this could be int32 or enum
+ if pointer {
+ return unmarshalInt32Ptr
+ }
+ if slice {
+ return unmarshalInt32Slice
+ }
+ return unmarshalInt32Value
+ case "zigzag32":
+ if pointer {
+ return unmarshalSint32Ptr
+ }
+ if slice {
+ return unmarshalSint32Slice
+ }
+ return unmarshalSint32Value
+ }
+ case reflect.Int64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixedS64Ptr
+ }
+ if slice {
+ return unmarshalFixedS64Slice
+ }
+ return unmarshalFixedS64Value
+ case "varint":
+ if pointer {
+ return unmarshalInt64Ptr
+ }
+ if slice {
+ return unmarshalInt64Slice
+ }
+ return unmarshalInt64Value
+ case "zigzag64":
+ if pointer {
+ return unmarshalSint64Ptr
+ }
+ if slice {
+ return unmarshalSint64Slice
+ }
+ return unmarshalSint64Value
+ }
+ case reflect.Uint32:
+ switch encoding {
+ case "fixed32":
+ if pointer {
+ return unmarshalFixed32Ptr
+ }
+ if slice {
+ return unmarshalFixed32Slice
+ }
+ return unmarshalFixed32Value
+ case "varint":
+ if pointer {
+ return unmarshalUint32Ptr
+ }
+ if slice {
+ return unmarshalUint32Slice
+ }
+ return unmarshalUint32Value
+ }
+ case reflect.Uint64:
+ switch encoding {
+ case "fixed64":
+ if pointer {
+ return unmarshalFixed64Ptr
+ }
+ if slice {
+ return unmarshalFixed64Slice
+ }
+ return unmarshalFixed64Value
+ case "varint":
+ if pointer {
+ return unmarshalUint64Ptr
+ }
+ if slice {
+ return unmarshalUint64Slice
+ }
+ return unmarshalUint64Value
+ }
+ case reflect.Float32:
+ if pointer {
+ return unmarshalFloat32Ptr
+ }
+ if slice {
+ return unmarshalFloat32Slice
+ }
+ return unmarshalFloat32Value
+ case reflect.Float64:
+ if pointer {
+ return unmarshalFloat64Ptr
+ }
+ if slice {
+ return unmarshalFloat64Slice
+ }
+ return unmarshalFloat64Value
+ case reflect.Map:
+ panic("map type in typeUnmarshaler in " + t.Name())
+ case reflect.Slice:
+ if pointer {
+ panic("bad pointer in slice case in " + t.Name())
+ }
+ if slice {
+ return unmarshalBytesSlice
+ }
+ return unmarshalBytesValue
+ case reflect.String:
+ if pointer {
+ return unmarshalStringPtr
+ }
+ if slice {
+ return unmarshalStringSlice
+ }
+ return unmarshalStringValue
+ case reflect.Struct:
+ // message or group field
+ if !pointer {
+ panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+ }
+ switch encoding {
+ case "bytes":
+ if slice {
+ return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+ case "group":
+ if slice {
+ return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+ }
+ return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+ }
+ }
+ panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x)
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64() = v
+ return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ *f.toInt64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int64(x>>1) ^ int64(x)<<63>>63
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64() = v
+ return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ *f.toUint64Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint64(x)
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x)
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ *f.toInt32() = v
+ return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.setInt32Ptr(v)
+ return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := int32(x>>1) ^ int32(x)<<31>>31
+ f.appendInt32Slice(v)
+ return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32() = v
+ return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ *f.toUint32Ptr() = &v
+ return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ v := uint32(x)
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ *f.toUint64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+ s := f.toUint64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64() = v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ *f.toInt64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+ s := f.toInt64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ *f.toUint32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+ s := f.toUint32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ *f.toInt32() = v
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.setInt32Ptr(v)
+ return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+ f.appendInt32Slice(v)
+ return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ // Note: any length varint is allowed, even though any sane
+ // encoder will use one byte.
+ // See https://github.com/golang/protobuf/issues/76
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // TODO: check if x>1? Tests seem to indicate no.
+ v := x != 0
+ *f.toBool() = v
+ return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ *f.toBoolPtr() = &v
+ return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ x, n = decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ b = b[n:]
+ }
+ return res, nil
+ }
+ if w != WireVarint {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := x != 0
+ s := f.toBoolSlice()
+ *s = append(*s, v)
+ return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64() = v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ *f.toFloat64Ptr() = &v
+ return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ b = b[8:]
+ }
+ return res, nil
+ }
+ if w != WireFixed64 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 8 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+ s := f.toFloat64Slice()
+ *s = append(*s, v)
+ return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32() = v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ *f.toFloat32Ptr() = &v
+ return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+ if w == WireBytes { // packed
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ res := b[x:]
+ b = b[:x]
+ for len(b) > 0 {
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ b = b[4:]
+ }
+ return res, nil
+ }
+ if w != WireFixed32 {
+ return b, errInternalBadWireType
+ }
+ if len(b) < 4 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+ s := f.toFloat32Slice()
+ *s = append(*s, v)
+ return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ *f.toString() = v
+ return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ *f.toStringPtr() = &v
+ return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := string(b[:x])
+ if !utf8.ValidString(v) {
+ return nil, errInvalidUTF8
+ }
+ s := f.toStringSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // The use of append here is a trick which avoids the zeroing
+ // that would be required if we used a make/copy pair.
+ // We append to emptyBuf instead of nil because we want
+ // a non-nil result even when the length is 0.
+ v := append(emptyBuf[:], b[:x]...)
+ *f.toBytes() = v
+ return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := append(emptyBuf[:], b[:x]...)
+ s := f.toBytesSlice()
+ *s = append(*s, v)
+ return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ // First read the message field to see if something is there.
+ // The semantics of multiple submessages are weird. Instead of
+ // the last one winning (as it is for all other fields), multiple
+ // submessages are merged.
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireBytes {
+ return b, errInternalBadWireType
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[x:], err
+ }
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := f.getPointer()
+ if v.isNil() {
+ v = valToPointer(reflect.New(sub.typ))
+ f.setPointer(v)
+ }
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ if w != WireStartGroup {
+ return b, errInternalBadWireType
+ }
+ x, y := findEndGroup(b)
+ if x < 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ v := valToPointer(reflect.New(sub.typ))
+ err := sub.unmarshal(v, b[:x])
+ if err != nil {
+ if r, ok := err.(*RequiredNotSetError); ok {
+ r.field = name + "." + r.field
+ } else {
+ return nil, err
+ }
+ }
+ f.appendPointer(v)
+ return b[y:], err
+ }
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+ t := f.Type
+ kt := t.Key()
+ vt := t.Elem()
+ unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+ unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // The map entry is a submessage. Figure out how big it is.
+ if w != WireBytes {
+ return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+ }
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ b = b[n:]
+ if x > uint64(len(b)) {
+ return nil, io.ErrUnexpectedEOF
+ }
+ r := b[x:] // unused data to return
+ b = b[:x] // data for map entry
+
+ // Note: we could use #keys * #values ~= 200 functions
+ // to do map decoding without reflection. Probably not worth it.
+ // Maps will be somewhat slow. Oh well.
+
+ // Read key and value from data.
+ k := reflect.New(kt)
+ v := reflect.New(vt)
+ for len(b) > 0 {
+ x, n := decodeVarint(b)
+ if n == 0 {
+ return nil, io.ErrUnexpectedEOF
+ }
+ wire := int(x) & 7
+ b = b[n:]
+
+ var err error
+ switch x >> 3 {
+ case 1:
+ b, err = unmarshalKey(b, valToPointer(k), wire)
+ case 2:
+ b, err = unmarshalVal(b, valToPointer(v), wire)
+ default:
+ err = errInternalBadWireType // skip unknown tag
+ }
+
+ if err == nil {
+ continue
+ }
+ if err != errInternalBadWireType {
+ return nil, err
+ }
+
+ // Skip past unknown fields.
+ b, err = skipField(b, wire)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Get map, allocate if needed.
+ m := f.asPointerTo(t).Elem() // an addressable map[K]T
+ if m.IsNil() {
+ m.Set(reflect.MakeMap(t))
+ }
+
+ // Insert into map.
+ m.SetMapIndex(k.Elem(), v.Elem())
+
+ return r, nil
+ }
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+// oneof F {
+// int64 X = 1;
+// float64 Y = 2;
+// }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+ sf := typ.Field(0)
+ field0 := toField(&sf)
+ return func(b []byte, f pointer, w int) ([]byte, error) {
+ // Allocate holder for value.
+ v := reflect.New(typ)
+
+ // Unmarshal data into holder.
+ // We unmarshal into the first field of the holder object.
+ var err error
+ b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+ if err != nil {
+ return nil, err
+ }
+
+ // Write pointer to holder into target field.
+ f.asPointerTo(ityp).Elem().Set(v)
+
+ return b, nil
+ }
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+ switch wire {
+ case WireVarint:
+ _, k := decodeVarint(b)
+ if k == 0 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[k:]
+ case WireFixed32:
+ if len(b) < 4 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[4:]
+ case WireFixed64:
+ if len(b) < 8 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[8:]
+ case WireBytes:
+ m, k := decodeVarint(b)
+ if k == 0 || uint64(len(b)-k) < m {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[uint64(k)+m:]
+ case WireStartGroup:
+ _, i := findEndGroup(b)
+ if i == -1 {
+ return b, io.ErrUnexpectedEOF
+ }
+ b = b[i:]
+ default:
+ return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+ }
+ return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+ depth := 1
+ i := 0
+ for {
+ x, n := decodeVarint(b[i:])
+ if n == 0 {
+ return -1, -1
+ }
+ j := i
+ i += n
+ switch x & 7 {
+ case WireVarint:
+ _, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ case WireFixed32:
+ if len(b)-4 < i {
+ return -1, -1
+ }
+ i += 4
+ case WireFixed64:
+ if len(b)-8 < i {
+ return -1, -1
+ }
+ i += 8
+ case WireBytes:
+ m, k := decodeVarint(b[i:])
+ if k == 0 {
+ return -1, -1
+ }
+ i += k
+ if uint64(len(b)-i) < m {
+ return -1, -1
+ }
+ i += int(m)
+ case WireStartGroup:
+ depth++
+ case WireEndGroup:
+ depth--
+ if depth == 0 {
+ return j, i
+ }
+ default:
+ return -1, -1
+ }
+ }
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+ for x >= 1<<7 {
+ b = append(b, byte(x&0x7f|0x80))
+ x >>= 7
+ }
+ return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+ var x, y uint64
+ if len(b) <= 0 {
+ goto bad
+ }
+ x = uint64(b[0])
+ if x < 0x80 {
+ return x, 1
+ }
+ x -= 0x80
+
+ if len(b) <= 1 {
+ goto bad
+ }
+ y = uint64(b[1])
+ x += y << 7
+ if y < 0x80 {
+ return x, 2
+ }
+ x -= 0x80 << 7
+
+ if len(b) <= 2 {
+ goto bad
+ }
+ y = uint64(b[2])
+ x += y << 14
+ if y < 0x80 {
+ return x, 3
+ }
+ x -= 0x80 << 14
+
+ if len(b) <= 3 {
+ goto bad
+ }
+ y = uint64(b[3])
+ x += y << 21
+ if y < 0x80 {
+ return x, 4
+ }
+ x -= 0x80 << 21
+
+ if len(b) <= 4 {
+ goto bad
+ }
+ y = uint64(b[4])
+ x += y << 28
+ if y < 0x80 {
+ return x, 5
+ }
+ x -= 0x80 << 28
+
+ if len(b) <= 5 {
+ goto bad
+ }
+ y = uint64(b[5])
+ x += y << 35
+ if y < 0x80 {
+ return x, 6
+ }
+ x -= 0x80 << 35
+
+ if len(b) <= 6 {
+ goto bad
+ }
+ y = uint64(b[6])
+ x += y << 42
+ if y < 0x80 {
+ return x, 7
+ }
+ x -= 0x80 << 42
+
+ if len(b) <= 7 {
+ goto bad
+ }
+ y = uint64(b[7])
+ x += y << 49
+ if y < 0x80 {
+ return x, 8
+ }
+ x -= 0x80 << 49
+
+ if len(b) <= 8 {
+ goto bad
+ }
+ y = uint64(b[8])
+ x += y << 56
+ if y < 0x80 {
+ return x, 9
+ }
+ x -= 0x80 << 56
+
+ if len(b) <= 9 {
+ goto bad
+ }
+ y = uint64(b[9])
+ x += y << 63
+ if y < 2 {
+ return x, 10
+ }
+
+bad:
+ return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
index 965876bf..2205fdaa 100644
--- a/vendor/github.com/golang/protobuf/proto/text.go
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -50,7 +50,6 @@ import (
var (
newline = []byte("\n")
spaces = []byte(" ")
- gtNewline = []byte(">\n")
endBraceNewline = []byte("}\n")
backslashN = []byte{'\\', 'n'}
backslashR = []byte{'\\', 'r'}
@@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error {
return nil
}
-// raw is the interface satisfied by RawMessage.
-type raw interface {
- Bytes() []byte
-}
-
func requiresQuotes(u string) bool {
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
for _, ch := range u {
@@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
props := sprops.Prop[i]
name := st.Field(i).Name
+ if name == "XXX_NoUnkeyedLiteral" {
+ continue
+ }
+
if strings.HasPrefix(name, "XXX_") {
// There are two XXX_ fields:
// XXX_unrecognized []byte
@@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return err
}
}
- if b, ok := fv.Interface().(raw); ok {
- if err := writeRaw(w, b.Bytes()); err != nil {
- return err
- }
- continue
- }
// Enums have a String method, so writeAny will work fine.
if err := tm.writeAny(w, fv, props); err != nil {
@@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
// Extensions (the XXX_extensions field).
pv := sv.Addr()
- if _, ok := extendable(pv.Interface()); ok {
+ if _, err := extendable(pv.Interface()); err == nil {
if err := tm.writeExtensions(w, pv); err != nil {
return err
}
@@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
return nil
}
-// writeRaw writes an uninterpreted raw message.
-func writeRaw(w *textWriter, b []byte) error {
- if err := w.WriteByte('<'); err != nil {
- return err
- }
- if !w.compact {
- if err := w.WriteByte('\n'); err != nil {
- return err
- }
- }
- w.indent()
- if err := writeUnknownStruct(w, b); err != nil {
- return err
- }
- w.unindent()
- if err := w.WriteByte('>'); err != nil {
- return err
- }
- return nil
-}
-
// writeAny writes an arbitrary field.
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
v = reflect.Indirect(v)
@@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
}
}
w.indent()
+ if v.CanAddr() {
+ // Calling v.Interface on a struct causes the reflect package to
+ // copy the entire struct. This is racy with the new Marshaler
+ // since we atomically update the XXX_sizecache.
+ //
+ // Thus, we retrieve a pointer to the struct if possible to avoid
+ // a race since v.Interface on the pointer doesn't copy the struct.
+ //
+ // If v is not addressable, then we are not worried about a race
+ // since it implies that the binary Marshaler cannot possibly be
+ // mutating this value.
+ v = v.Addr()
+ }
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
text, err := etm.MarshalText()
if err != nil {
@@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
if _, err = w.Write(text); err != nil {
return err
}
- } else if err := tm.writeStruct(w, v); err != nil {
- return err
+ } else {
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
}
w.unindent()
if err := w.WriteByte(ket); err != nil {
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
index 7e6f145a..0685bae3 100644
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -206,7 +206,6 @@ func (p *textParser) advance() {
var (
errBadUTF8 = errors.New("proto: bad UTF-8")
- errBadHex = errors.New("proto: bad hexadecimal")
)
func unquoteC(s string, quote rune) (string, error) {
@@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) {
return "?", s, nil // trigraph workaround
case '\'', '"', '\\':
return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ case '0', '1', '2', '3', '4', '5', '6', '7':
if len(s) < 2 {
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
}
- base := 8
- ss := s[:2]
+ ss := string(r) + s[:2]
s = s[2:]
- if r == 'x' || r == 'X' {
- base = 16
- } else {
- ss = string(r) + ss
- }
- i, err := strconv.ParseUint(ss, base, 8)
+ i, err := strconv.ParseUint(ss, 8, 8)
if err != nil {
- return "", "", err
+ return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
}
return string([]byte{byte(i)}), s, nil
- case 'u', 'U':
- n := 4
- if r == 'U' {
+ case 'x', 'X', 'u', 'U':
+ var n int
+ switch r {
+ case 'x', 'X':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
n = 8
}
if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
- }
-
- bs := make([]byte, n/2)
- for i := 0; i < n; i += 2 {
- a, ok1 := unhex(s[i])
- b, ok2 := unhex(s[i+1])
- if !ok1 || !ok2 {
- return "", "", errBadHex
- }
- bs[i/2] = a<<4 | b
+ return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
}
+ ss := s[:n]
s = s[n:]
- return string(bs), s, nil
+ i, err := strconv.ParseUint(ss, 16, 64)
+ if err != nil {
+ return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+ }
+ if r == 'x' || r == 'X' {
+ return string([]byte{byte(i)}), s, nil
+ }
+ if i > utf8.MaxRune {
+ return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+ }
+ return string(i), s, nil
}
return "", "", fmt.Errorf(`unknown escape \%c`, r)
}
-// Adapted from src/pkg/strconv/quote.go.
-func unhex(b byte) (v byte, ok bool) {
- switch {
- case '0' <= b && b <= '9':
- return b - '0', true
- case 'a' <= b && b <= 'f':
- return b - 'a' + 10, true
- case 'A' <= b && b <= 'F':
- return b - 'A' + 10, true
- }
- return 0, false
-}
-
// Back off the parser by one token. Can only be done between calls to next().
// It makes the next advance() a no-op.
func (p *textParser) back() { p.backed = true }
@@ -592,7 +578,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
props = oop.Prop
nv := reflect.New(oop.Type.Elem())
dst = nv.Elem().Field(0)
- sv.Field(oop.Field).Set(nv)
+ field := sv.Field(oop.Field)
+ if !field.IsNil() {
+ return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+ }
+ field.Set(nv)
}
if !dst.IsValid() {
return p.errorf("unknown field name %q in %v", name, st)
@@ -724,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) {
if tok.err != nil {
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
}
+ if p.done && tok.value != "]" {
+ return "", p.errorf("unclosed type_url or extension name")
+ }
}
return strings.Join(parts, ""), nil
}
@@ -792,12 +785,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
return p.readAny(fv.Index(fv.Len()-1), props)
case reflect.Bool:
- // Either "true", "false", 1 or 0.
+ // true/1/t/True or false/f/0/False.
switch tok.value {
- case "true", "1":
+ case "true", "1", "t", "True":
fv.SetBool(true)
return nil
- case "false", "0":
+ case "false", "0", "f", "False":
fv.SetBool(false)
return nil
}
@@ -879,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
// UnmarshalText returns *RequiredNotSetError.
func UnmarshalText(s string, pb Message) error {
if um, ok := pb.(encoding.TextUnmarshaler); ok {
- err := um.UnmarshalText([]byte(s))
- return err
+ return um.UnmarshalText([]byte(s))
}
pb.Reset()
v := reflect.ValueOf(pb)
- if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
- return pe
- }
- return nil
+ return newTextParser(s).readStruct(v.Elem(), "")
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index 493674a3..f67edc7d 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,16 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/any.proto
-/*
-Package any is a generated protocol buffer package.
-
-It is generated from these files:
- google/protobuf/any.proto
-
-It has these top-level messages:
- Any
-*/
-package any
+package any // import "github.com/golang/protobuf/ptypes/any"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -52,7 +43,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// foo = any.unpack(Foo.class);
// }
//
-// Example 3: Pack and unpack a message in Python.
+// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
@@ -62,7 +53,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// any.Unpack(foo)
// ...
//
-// Example 4: Pack and unpack a message in Go
+// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
@@ -80,7 +71,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
//
//
// JSON
-//
+// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
@@ -132,14 +123,36 @@ type Any struct {
//
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Any) Reset() { *m = Any{} }
+func (m *Any) String() string { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage() {}
+func (*Any) Descriptor() ([]byte, []int) {
+ return fileDescriptor_any_744b9ca530f228db, []int{0}
+}
+func (*Any) XXX_WellKnownType() string { return "Any" }
+func (m *Any) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Any.Unmarshal(m, b)
+}
+func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Any.Marshal(b, m, deterministic)
+}
+func (dst *Any) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Any.Merge(dst, src)
+}
+func (m *Any) XXX_Size() int {
+ return xxx_messageInfo_Any.Size(m)
+}
+func (m *Any) XXX_DiscardUnknown() {
+ xxx_messageInfo_Any.DiscardUnknown(m)
}
-func (m *Any) Reset() { *m = Any{} }
-func (m *Any) String() string { return proto.CompactTextString(m) }
-func (*Any) ProtoMessage() {}
-func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (*Any) XXX_WellKnownType() string { return "Any" }
+var xxx_messageInfo_Any proto.InternalMessageInfo
func (m *Any) GetTypeUrl() string {
if m != nil {
@@ -159,9 +172,9 @@ func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
-func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) }
-var fileDescriptor0 = []byte{
+var fileDescriptor_any_744b9ca530f228db = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
index 9d805e1e..c7486676 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -64,7 +64,7 @@ option objc_class_prefix = "GPB";
// foo = any.unpack(Foo.class);
// }
//
-// Example 3: Pack and unpack a message in Python.
+// Example 3: Pack and unpack a message in Python.
//
// foo = Foo(...)
// any = Any()
@@ -74,7 +74,7 @@ option objc_class_prefix = "GPB";
// any.Unpack(foo)
// ...
//
-// Example 4: Pack and unpack a message in Go
+// Example 4: Pack and unpack a message in Go
//
// foo := &pb.Foo{...}
// any, err := ptypes.MarshalAny(foo)
@@ -92,7 +92,7 @@ option objc_class_prefix = "GPB";
//
//
// JSON
-//
+// ====
// The JSON representation of an `Any` value uses the regular
// representation of the deserialized, embedded message, with an
// additional field `@type` which contains the type URL. Example:
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index b2410a09..4d75473b 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,16 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/duration.proto
-/*
-Package duration is a generated protocol buffer package.
-
-It is generated from these files:
- google/protobuf/duration.proto
-
-It has these top-level messages:
- Duration
-*/
-package duration
+package duration // import "github.com/golang/protobuf/ptypes/duration"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -98,14 +89,36 @@ type Duration struct {
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Duration) Reset() { *m = Duration{} }
-func (m *Duration) String() string { return proto.CompactTextString(m) }
-func (*Duration) ProtoMessage() {}
-func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (*Duration) XXX_WellKnownType() string { return "Duration" }
+func (m *Duration) Reset() { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage() {}
+func (*Duration) Descriptor() ([]byte, []int) {
+ return fileDescriptor_duration_e7d612259e3f0613, []int{0}
+}
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+func (m *Duration) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Duration.Unmarshal(m, b)
+}
+func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
+}
+func (dst *Duration) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Duration.Merge(dst, src)
+}
+func (m *Duration) XXX_Size() int {
+ return xxx_messageInfo_Duration.Size(m)
+}
+func (m *Duration) XXX_DiscardUnknown() {
+ xxx_messageInfo_Duration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Duration proto.InternalMessageInfo
func (m *Duration) GetSeconds() int64 {
if m != nil {
@@ -125,9 +138,11 @@ func init() {
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
-func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
+func init() {
+ proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613)
+}
-var fileDescriptor0 = []byte{
+var fileDescriptor_duration_e7d612259e3f0613 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
index e877b72c..a69b403c 100644
--- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -1,16 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/empty.proto
-/*
-Package empty is a generated protocol buffer package.
-
-It is generated from these files:
- google/protobuf/empty.proto
-
-It has these top-level messages:
- Empty
-*/
-package empty
+package empty // import "github.com/golang/protobuf/ptypes/empty"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -37,21 +28,43 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
//
// The JSON representation for `Empty` is empty JSON object `{}`.
type Empty struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Empty) Reset() { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage() {}
+func (*Empty) Descriptor() ([]byte, []int) {
+ return fileDescriptor_empty_39e6d6db0632e5b2, []int{0}
+}
+func (*Empty) XXX_WellKnownType() string { return "Empty" }
+func (m *Empty) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Empty.Unmarshal(m, b)
+}
+func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
+}
+func (dst *Empty) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Empty.Merge(dst, src)
+}
+func (m *Empty) XXX_Size() int {
+ return xxx_messageInfo_Empty.Size(m)
+}
+func (m *Empty) XXX_DiscardUnknown() {
+ xxx_messageInfo_Empty.DiscardUnknown(m)
}
-func (m *Empty) Reset() { *m = Empty{} }
-func (m *Empty) String() string { return proto.CompactTextString(m) }
-func (*Empty) ProtoMessage() {}
-func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (*Empty) XXX_WellKnownType() string { return "Empty" }
+var xxx_messageInfo_Empty proto.InternalMessageInfo
func init() {
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
}
-func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) }
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_empty_39e6d6db0632e5b2) }
-var fileDescriptor0 = []byte{
+var fileDescriptor_empty_39e6d6db0632e5b2 = []byte{
// 148 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh
deleted file mode 100755
index b50a9414..00000000
--- a/vendor/github.com/golang/protobuf/ptypes/regen.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash -e
-#
-# This script fetches and rebuilds the "well-known types" protocol buffers.
-# To run this you will need protoc and goprotobuf installed;
-# see https://github.com/golang/protobuf for instructions.
-# You also need Go and Git installed.
-
-PKG=github.com/golang/protobuf/ptypes
-UPSTREAM=https://github.com/google/protobuf
-UPSTREAM_SUBDIR=src/google/protobuf
-PROTO_FILES=(any duration empty struct timestamp wrappers)
-
-function die() {
- echo 1>&2 $*
- exit 1
-}
-
-# Sanity check that the right tools are accessible.
-for tool in go git protoc protoc-gen-go; do
- q=$(which $tool) || die "didn't find $tool"
- echo 1>&2 "$tool: $q"
-done
-
-tmpdir=$(mktemp -d -t regen-wkt.XXXXXX)
-trap 'rm -rf $tmpdir' EXIT
-
-echo -n 1>&2 "finding package dir... "
-pkgdir=$(go list -f '{{.Dir}}' $PKG)
-echo 1>&2 $pkgdir
-base=$(echo $pkgdir | sed "s,/$PKG\$,,")
-echo 1>&2 "base: $base"
-cd "$base"
-
-echo 1>&2 "fetching latest protos... "
-git clone -q $UPSTREAM $tmpdir
-
-for file in ${PROTO_FILES[@]}; do
- echo 1>&2 "* $file"
- protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die
- cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file
-done
-
-echo 1>&2 "All OK"
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
index 4cfe6081..442c0e09 100644
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -1,18 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/struct.proto
-/*
-Package structpb is a generated protocol buffer package.
-
-It is generated from these files:
- google/protobuf/struct.proto
-
-It has these top-level messages:
- Struct
- Value
- ListValue
-*/
-package structpb
+package structpb // import "github.com/golang/protobuf/ptypes/struct"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -50,8 +39,10 @@ var NullValue_value = map[string]int32{
func (x NullValue) String() string {
return proto.EnumName(NullValue_name, int32(x))
}
-func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_struct_3a5a94e0c7801b27, []int{0}
+}
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
// `Struct` represents a structured data value, consisting of fields
// which map to dynamically typed values. In some languages, `Struct`
@@ -63,14 +54,36 @@ func (NullValue) XXX_WellKnownType() string { return "NullValue" }
// The JSON representation for `Struct` is JSON object.
type Struct struct {
// Unordered map of dynamically typed values.
- Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Struct) Reset() { *m = Struct{} }
-func (m *Struct) String() string { return proto.CompactTextString(m) }
-func (*Struct) ProtoMessage() {}
-func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (*Struct) XXX_WellKnownType() string { return "Struct" }
+func (m *Struct) Reset() { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage() {}
+func (*Struct) Descriptor() ([]byte, []int) {
+ return fileDescriptor_struct_3a5a94e0c7801b27, []int{0}
+}
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+func (m *Struct) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Struct.Unmarshal(m, b)
+}
+func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
+}
+func (dst *Struct) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Struct.Merge(dst, src)
+}
+func (m *Struct) XXX_Size() int {
+ return xxx_messageInfo_Struct.Size(m)
+}
+func (m *Struct) XXX_DiscardUnknown() {
+ xxx_messageInfo_Struct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Struct proto.InternalMessageInfo
func (m *Struct) GetFields() map[string]*Value {
if m != nil {
@@ -95,14 +108,36 @@ type Value struct {
// *Value_BoolValue
// *Value_StructValue
// *Value_ListValue
- Kind isValue_Kind `protobuf_oneof:"kind"`
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage() {}
+func (*Value) Descriptor() ([]byte, []int) {
+ return fileDescriptor_struct_3a5a94e0c7801b27, []int{1}
+}
+func (*Value) XXX_WellKnownType() string { return "Value" }
+func (m *Value) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Value.Unmarshal(m, b)
+}
+func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Value.Marshal(b, m, deterministic)
+}
+func (dst *Value) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Value.Merge(dst, src)
+}
+func (m *Value) XXX_Size() int {
+ return xxx_messageInfo_Value.Size(m)
+}
+func (m *Value) XXX_DiscardUnknown() {
+ xxx_messageInfo_Value.DiscardUnknown(m)
}
-func (m *Value) Reset() { *m = Value{} }
-func (m *Value) String() string { return proto.CompactTextString(m) }
-func (*Value) ProtoMessage() {}
-func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-func (*Value) XXX_WellKnownType() string { return "Value" }
+var xxx_messageInfo_Value proto.InternalMessageInfo
type isValue_Kind interface {
isValue_Kind()
@@ -289,26 +324,26 @@ func _Value_OneofSizer(msg proto.Message) (n int) {
// kind
switch x := m.Kind.(type) {
case *Value_NullValue:
- n += proto.SizeVarint(1<<3 | proto.WireVarint)
+ n += 1 // tag and wire
n += proto.SizeVarint(uint64(x.NullValue))
case *Value_NumberValue:
- n += proto.SizeVarint(2<<3 | proto.WireFixed64)
+ n += 1 // tag and wire
n += 8
case *Value_StringValue:
- n += proto.SizeVarint(3<<3 | proto.WireBytes)
+ n += 1 // tag and wire
n += proto.SizeVarint(uint64(len(x.StringValue)))
n += len(x.StringValue)
case *Value_BoolValue:
- n += proto.SizeVarint(4<<3 | proto.WireVarint)
+ n += 1 // tag and wire
n += 1
case *Value_StructValue:
s := proto.Size(x.StructValue)
- n += proto.SizeVarint(5<<3 | proto.WireBytes)
+ n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case *Value_ListValue:
s := proto.Size(x.ListValue)
- n += proto.SizeVarint(6<<3 | proto.WireBytes)
+ n += 1 // tag and wire
n += proto.SizeVarint(uint64(s))
n += s
case nil:
@@ -323,14 +358,36 @@ func _Value_OneofSizer(msg proto.Message) (n int) {
// The JSON representation for `ListValue` is JSON array.
type ListValue struct {
// Repeated field of dynamically typed values.
- Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
+ Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *ListValue) Reset() { *m = ListValue{} }
-func (m *ListValue) String() string { return proto.CompactTextString(m) }
-func (*ListValue) ProtoMessage() {}
-func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+func (m *ListValue) Reset() { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage() {}
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_struct_3a5a94e0c7801b27, []int{2}
+}
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+func (m *ListValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ListValue.Unmarshal(m, b)
+}
+func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
+}
+func (dst *ListValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ListValue.Merge(dst, src)
+}
+func (m *ListValue) XXX_Size() int {
+ return xxx_messageInfo_ListValue.Size(m)
+}
+func (m *ListValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_ListValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListValue proto.InternalMessageInfo
func (m *ListValue) GetValues() []*Value {
if m != nil {
@@ -341,14 +398,17 @@ func (m *ListValue) GetValues() []*Value {
func init() {
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+ proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
proto.RegisterType((*Value)(nil), "google.protobuf.Value")
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
}
-func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) }
+func init() {
+ proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_struct_3a5a94e0c7801b27)
+}
-var fileDescriptor0 = []byte{
+var fileDescriptor_struct_3a5a94e0c7801b27 = []byte{
// 417 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index e23e4a25..e9c22228 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,16 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/timestamp.proto
-/*
-Package timestamp is a generated protocol buffer package.
-
-It is generated from these files:
- google/protobuf/timestamp.proto
-
-It has these top-level messages:
- Timestamp
-*/
-package timestamp
+package timestamp // import "github.com/golang/protobuf/ptypes/timestamp"
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
@@ -101,7 +92,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
//
//
@@ -114,14 +105,36 @@ type Timestamp struct {
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
-func (m *Timestamp) Reset() { *m = Timestamp{} }
-func (m *Timestamp) String() string { return proto.CompactTextString(m) }
-func (*Timestamp) ProtoMessage() {}
-func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+func (m *Timestamp) Reset() { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage() {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0}
+}
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (dst *Timestamp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Timestamp.Merge(dst, src)
+}
+func (m *Timestamp) XXX_Size() int {
+ return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+ xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
func (m *Timestamp) GetSeconds() int64 {
if m != nil {
@@ -141,9 +154,11 @@ func init() {
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
-func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
+func init() {
+ proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8)
+}
-var fileDescriptor0 = []byte{
+var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{
// 191 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
index b7cbd175..06750ab1 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -114,7 +114,7 @@ option objc_class_prefix = "GPB";
// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
-// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--)
// to obtain a formatter capable of generating timestamps in this format.
//
//
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 623d3d83..3c9bae24 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -29,24 +29,35 @@ type Collector interface {
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc
- // documentation. (It is valid if one and the same Collector sends
- // duplicate descriptors. Those duplicates are simply ignored. However,
- // two different Collectors must not send duplicate descriptors.) This
- // method idempotently sends the same descriptors throughout the
- // lifetime of the Collector. If a Collector encounters an error while
- // executing this method, it must send an invalid descriptor (created
- // with NewInvalidDesc) to signal the error to the registry.
+ // documentation.
+ //
+ // It is valid if one and the same Collector sends duplicate
+ // descriptors. Those duplicates are simply ignored. However, two
+ // different Collectors must not send duplicate descriptors.
+ //
+ // Sending no descriptor at all marks the Collector as “unchecked”,
+ // i.e. no checks will be performed at registration time, and the
+ // Collector may yield any Metric it sees fit in its Collect method.
+ //
+ // This method idempotently sends the same descriptors throughout the
+ // lifetime of the Collector.
+ //
+ // If a Collector encounters an error while executing this method, it
+ // must send an invalid descriptor (created with NewInvalidDesc) to
+ // signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
- // descriptor of each sent metric is one of those returned by
- // Describe. Returned metrics that share the same descriptor must differ
- // in their variable label values. This method may be called
- // concurrently and must therefore be implemented in a concurrency safe
- // way. Blocking occurs at the expense of total performance of rendering
- // all registered metrics. Ideally, Collector implementations support
- // concurrent readers.
+ // descriptor of each sent metric is one of those returned by Describe
+ // (unless the Collector is unchecked, see above). Returned metrics that
+ // share the same descriptor must differ in their variable label
+ // values.
+ //
+ // This method may be called concurrently and must therefore be
+ // implemented in a concurrency safe way. Blocking occurs at the expense
+ // of total performance of rendering all registered metrics. Ideally,
+ // Collector implementations support concurrent readers.
Collect(chan<- Metric)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index ee37949a..765e4550 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -15,6 +15,10 @@ package prometheus
import (
"errors"
+ "math"
+ "sync/atomic"
+
+ dto "github.com/prometheus/client_model/go"
)
// Counter is a Metric that represents a single numerical value that only ever
@@ -30,16 +34,8 @@ type Counter interface {
Metric
Collector
- // Set is used to set the Counter to an arbitrary value. It is only used
- // if you have to transfer a value from an external counter into this
- // Prometheus metric. Do not use it for regular handling of a
- // Prometheus counter (as it can be used to break the contract of
- // monotonically increasing values).
- //
- // Deprecated: Use NewConstMetric to create a counter for an external
- // value. A Counter should never be set.
- Set(float64)
- // Inc increments the counter by 1.
+ // Inc increments the counter by 1. Use Add to increment it by arbitrary
+ // non-negative values.
Inc()
// Add adds the given value to the counter. It panics if the value is <
// 0.
@@ -50,6 +46,14 @@ type Counter interface {
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
func NewCounter(opts CounterOpts) Counter {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -57,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
- result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
+ result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
result.init(result) // Init self-collection.
return result
}
type counter struct {
- value
+ // valBits contains the bits of the represented float64 value, while
+ // valInt stores values that are exact integers. Both have to go first
+ // in the struct to guarantee alignment for atomic operations.
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+ valInt uint64
+
+ selfCollector
+ desc *Desc
+
+ labelPairs []*dto.LabelPair
+}
+
+func (c *counter) Desc() *Desc {
+ return c.desc
}
func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
- c.value.Add(v)
+ ival := uint64(v)
+ if float64(ival) == v {
+ atomic.AddUint64(&c.valInt, ival)
+ return
+ }
+
+ for {
+ oldBits := atomic.LoadUint64(&c.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+ if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (c *counter) Inc() {
+ atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+ fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+ ival := atomic.LoadUint64(&c.valInt)
+ val := fval + float64(ival)
+
+ return populateMetric(CounterValue, val, c.labelPairs, out)
}
// CounterVec is a Collector that bundles a set of Counters that all share the
@@ -78,16 +120,12 @@ func (c *counter) Add(v float64) {
// if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
-//
-// CounterVec embeds MetricVec. See there for a full list of methods with
-// detailed documentation.
type CounterVec struct {
- *MetricVec
+ *metricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -96,34 +134,62 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
- result := &counter{value: value{
- desc: desc,
- valType: CounterValue,
- labelPairs: makeLabelPairs(desc, lvs),
- }}
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
return result
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Counter and not a
-// Metric so that no type conversion is required.
-func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Counter and not a Metric so that no
-// type conversion is required.
-func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
@@ -131,18 +197,57 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
- return m.MetricVec.WithLabelValues(lvs...).(Counter)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+ c, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return c
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *CounterVec) With(labels Labels) Counter {
- return m.MetricVec.With(labels).(Counter)
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+ c, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &CounterVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
}
// CounterFunc is a Counter whose value is determined at collect time by calling a
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 0593be50..4a755b0f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -16,33 +16,15 @@ package prometheus
import (
"errors"
"fmt"
- "regexp"
"sort"
"strings"
"github.com/golang/protobuf/proto"
+ "github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
-var (
- metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
- labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
-)
-
-// reservedLabelPrefix is a prefix which is not legal in user-supplied
-// label names.
-const reservedLabelPrefix = "__"
-
-// Labels represents a collection of label name -> value mappings. This type is
-// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
-// metric vector Collectors, e.g.:
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-//
-// The other use-case is the specification of constant label pairs in Opts or to
-// create a Desc.
-type Labels map[string]string
-
// Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to
@@ -91,8 +73,7 @@ type Desc struct {
// and therefore not part of the Desc. (They are managed within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
-// specified in the Desc. See the Opts documentation for the implications of
-// constant labels.
+// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
@@ -103,7 +84,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("empty help string")
return d
}
- if !metricNameRE.MatchString(fqName) {
+ if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
@@ -127,6 +108,12 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName])
}
+ // Validate the const label values. They can't have a wrong cardinality, so
+ // use in len(labelValues) as expectedNumberOfValues.
+ if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+ d.err = err
+ return d
+ }
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
@@ -142,6 +129,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("duplicate label names")
return d
}
+
vh := hashNew()
for _, val := range labelValues {
vh = hashAdd(vh, val)
@@ -198,8 +186,3 @@ func (d *Desc) String() string {
d.variableLabels,
)
}
-
-func checkLabelName(l string) bool {
- return labelNameRE.MatchString(l) &&
- !strings.HasPrefix(l, reservedLabelPrefix)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index b15a2d3b..5d9525de 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -11,13 +11,15 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package prometheus provides metrics primitives to instrument code for
-// monitoring. It also offers a registry for metrics. Sub-packages allow to
-// expose the registered metrics via HTTP (package promhttp) or push them to a
-// Pushgateway (package push).
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
//
// All exported functions and methods are safe to be used concurrently unless
-//specified otherwise.
+// specified otherwise.
//
// A Basic Example
//
@@ -26,6 +28,7 @@
// package main
//
// import (
+// "log"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
@@ -59,7 +62,7 @@
// // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
-// http.ListenAndServe(":8080", nil)
+// log.Fatal(http.ListenAndServe(":8080", nil))
// }
//
//
@@ -69,9 +72,12 @@
// Metrics
//
// The number of exported identifiers in this package might appear a bit
-// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
-// vector versions for basic usage.
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
@@ -95,8 +101,8 @@
// SummaryVec, HistogramVec, and UntypedVec are not.
//
// To create instances of Metrics and their vector versions, you need a suitable
-// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts,
-// HistogramOpts, or UntypedOpts.
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or
+// UntypedOpts.
//
// Custom Collectors and constant Metrics
//
@@ -114,8 +120,18 @@
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). That will happen in
// the Collect method. The Describe method has to return separate Desc
-// instances, representative of the “throw-away” metrics to be created
-// later. NewDesc comes in handy to create those Desc instances.
+// instances, representative of the “throw-away” metrics to be created later.
+// NewDesc comes in handy to create those Desc instances. Alternatively, you
+// could return no Desc at all, which will marke the Collector “unchecked”. No
+// checks are porformed at registration time, but metric consistency will still
+// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situatios where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
//
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
@@ -129,34 +145,34 @@
// Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
-// sometimes you might want to handle the errors the registration might
-// cause. As suggested by the name, MustRegister panics if an error occurs. With
-// the Register function, the error is returned and can be handled.
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
-// consistency of the collected metrics according to the Prometheus data
-// model. Inconsistencies are ideally detected at registration time, not at
-// collect time. The former will usually be detected at start-up time of a
-// program, while the latter will only happen at scrape time, possibly not even
-// on the first scrape if the inconsistency only becomes relevant later. That is
-// the main reason why a Collector and a Metric have to describe themselves to
-// the registry.
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
//
// So far, everything we did operated on the so-called default registry, as it
-// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
-// Gatherer interfaces yourself. The methods Register and Unregister work in
-// the same way on a custom registry as the global functions Register and
-// Unregister on the default registry.
-//
-// There are a number of uses for custom registries: You can use registries
-// with special properties, see NewPedanticRegistry. You can avoid global state,
-// as it is imposed by the DefaultRegistry. You can use multiple registries at
-// the same time to expose different metrics in different ways. You can use
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes.
//
-// Also note that the DefaultRegistry comes registered with a Collector for Go
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
@@ -166,16 +182,20 @@
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
-// above. The tools to expose metrics via HTTP are in the promhttp
-// sub-package. (The top-level functions in the prometheus package are
-// deprecated.)
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+// (The top-level functions in the prometheus package are deprecated.)
//
// Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
+// Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
// Other Means of Exposition
//
-// More ways of exposing metrics can easily be added. Sending metrics to
-// Graphite would be an example that will soon be implemented.
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index 8b70e514..17c72d7e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -13,6 +13,14 @@
package prometheus
+import (
+ "math"
+ "sync/atomic"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
@@ -27,29 +35,95 @@ type Gauge interface {
// Set sets the Gauge to an arbitrary value.
Set(float64)
- // Inc increments the Gauge by 1.
+ // Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+ // values.
Inc()
- // Dec decrements the Gauge by 1.
+ // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+ // values.
Dec()
- // Add adds the given value to the Gauge. (The value can be
- // negative, resulting in a decrease of the Gauge.)
+ // Add adds the given value to the Gauge. (The value can be negative,
+ // resulting in a decrease of the Gauge.)
Add(float64)
// Sub subtracts the given value from the Gauge. (The value can be
// negative, resulting in an increase of the Gauge.)
Sub(float64)
+
+ // SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+ SetToCurrentTime()
}
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
// NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
func NewGauge(opts GaugeOpts) Gauge {
- return newValue(NewDesc(
+ desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
- ), GaugeValue, 0)
+ )
+ result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+ result.init(result) // Init self-collection.
+ return result
+}
+
+type gauge struct {
+ // valBits contains the bits of the represented float64 value. It has
+ // to go first in the struct to guarantee alignment for atomic
+ // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ valBits uint64
+
+ selfCollector
+
+ desc *Desc
+ labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+ return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+ atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+ g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+ g.Add(1)
+}
+
+func (g *gauge) Dec() {
+ g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+ for {
+ oldBits := atomic.LoadUint64(&g.valBits)
+ newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+ if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+ return
+ }
+ }
+}
+
+func (g *gauge) Sub(val float64) {
+ g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+ val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+ return populateMetric(GaugeValue, val, g.labelPairs, out)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@@ -58,12 +132,11 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
- *MetricVec
+ *metricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -72,28 +145,62 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
- return newValue(desc, GaugeValue, 0, lvs...)
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ if len(lvs) != len(desc.variableLabels) {
+ panic(errInconsistentCardinality)
+ }
+ result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
+ result.init(result) // Init self-collection.
+ return result
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Gauge and not a
-// Metric so that no type conversion is required.
-func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Gauge and not a Metric so that no
-// type conversion is required.
-func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
@@ -101,18 +208,57 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
- return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+ g, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return g
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *GaugeVec) With(labels Labels) Gauge {
- return m.MetricVec.With(labels).(Gauge)
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+ g, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &GaugeVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
}
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index abc9d4ec..0440bd12 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -8,26 +8,39 @@ import (
)
type goCollector struct {
- goroutines Gauge
- gcDesc *Desc
+ goroutinesDesc *Desc
+ threadsDesc *Desc
+ gcDesc *Desc
+ goInfoDesc *Desc
// metrics to describe and collect
metrics memStatsMetrics
}
-// NewGoCollector returns a collector which exports metrics about the current
-// go process.
+// NewGoCollector returns a collector which exports metrics about the current Go
+// process. This includes memory stats. To collect those, runtime.ReadMemStats
+// is called. This causes a stop-the-world, which is very short with Go1.9+
+// (~25µs). However, with older Go versions, the stop-the-world duration depends
+// on the heap size and can be quite significant (~1.7 ms/GiB as per
+// https://go-review.googlesource.com/c/go/+/34937).
func NewGoCollector() Collector {
return &goCollector{
- goroutines: NewGauge(GaugeOpts{
- Namespace: "go",
- Name: "goroutines",
- Help: "Number of goroutines that currently exist.",
- }),
+ goroutinesDesc: NewDesc(
+ "go_goroutines",
+ "Number of goroutines that currently exist.",
+ nil, nil),
+ threadsDesc: NewDesc(
+ "go_threads",
+ "Number of OS threads created.",
+ nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
nil, nil),
+ goInfoDesc: NewDesc(
+ "go_info",
+ "Information about the Go environment.",
+ nil, Labels{"version": runtime.Version()}),
metrics: memStatsMetrics{
{
desc: NewDesc(
@@ -48,7 +61,7 @@ func NewGoCollector() Collector {
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
- "Number of bytes obtained by system. Sum of all system allocations.",
+ "Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
@@ -111,12 +124,12 @@ func NewGoCollector() Collector {
valType: GaugeValue,
}, {
desc: NewDesc(
- memstatNamespace("heap_released_bytes_total"),
- "Total number of heap bytes released to OS.",
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: CounterValue,
+ valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
@@ -213,6 +226,14 @@ func NewGoCollector() Collector {
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
},
},
}
@@ -224,9 +245,10 @@ func memstatNamespace(s string) string {
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
- ch <- c.goroutines.Desc()
+ ch <- c.goroutinesDesc
+ ch <- c.threadsDesc
ch <- c.gcDesc
-
+ ch <- c.goInfoDesc
for _, i := range c.metrics {
ch <- i.desc
}
@@ -234,8 +256,9 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
- c.goroutines.Set(float64(runtime.NumGoroutine()))
- ch <- c.goroutines
+ ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+ n, _ := runtime.ThreadCreateProfile(nil)
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
@@ -246,7 +269,9 @@ func (c *goCollector) Collect(ch chan<- Metric) {
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
- ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+ ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+
+ ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
ms := &runtime.MemStats{}
runtime.ReadMemStats(ms)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 9719e8fa..331783a7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -126,23 +126,16 @@ type HistogramOpts struct {
// string.
Help string
- // ConstLabels are used to attach fixed labels to this
- // Histogram. Histograms with the same fully-qualified name must have the
- // same label names in their ConstLabels.
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
//
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a
- // HistogramVec. ConstLabels serve only special purposes. One is for the
- // special case where the value of a label does not change during the
- // lifetime of a process, e.g. if the revision of the running binary is
- // put into a label. Another, more advanced purpose is if more than one
- // Collector needs to collect Histograms with the same fully-qualified
- // name. In that case, those Summaries must differ in the values of
- // their ConstLabels. See the Collector examples.
- //
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each
@@ -287,12 +280,11 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
- *MetricVec
+ *metricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -301,47 +293,116 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Histogram and not a
-// Metric so that no type conversion is required.
-func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
- return metric.(Histogram), err
+ return metric.(Observer), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Histogram and not a Metric so that no
-// type conversion is required.
-func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
if metric != nil {
- return metric.(Histogram), err
+ return metric.(Observer), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
- return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+ h, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+ h, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return h
}
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *HistogramVec) With(labels Labels) Histogram {
- return m.MetricVec.With(labels).(Histogram)
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &HistogramVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
}
type constHistogram struct {
@@ -401,8 +462,8 @@ func NewConstHistogram(
buckets map[float64]uint64,
labelValues ...string,
) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constHistogram{
desc: desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
index b2b00196..3f1fa151 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -61,8 +61,8 @@ func giveBuf(buf *bytes.Buffer) {
// name).
//
// Deprecated: Please note the issues described in the doc comment of
-// InstrumentHandler. You might want to consider using promhttp.Handler instead
-// (which is non instrumented).
+// InstrumentHandler. You might want to consider using
+// promhttp.InstrumentedHandler instead.
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
@@ -95,7 +95,7 @@ func UninstrumentedHandler() http.Handler {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
- http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
@@ -115,7 +115,7 @@ func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string)
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
- part := strings.TrimSpace(part)
+ part = strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
@@ -139,16 +139,6 @@ var now nower = nowFunc(func() time.Time {
return time.Now()
})
-func nowSeries(t ...time.Time) nower {
- return nowFunc(func() time.Time {
- defer func() {
- t = t[1:]
- }()
-
- return t[0]
- })
-}
-
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
// registers four metric collectors (if not already done) and reports HTTP
// metrics to the (newly or already) registered collectors: http_requests_total
@@ -158,7 +148,8 @@ func nowSeries(t ...time.Time) nower {
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
-// Deprecated: InstrumentHandler has several issues:
+// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
+// package promhttp instead. The issues are the following:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
@@ -172,9 +163,8 @@ func nowSeries(t ...time.Time) nower {
// httputil.ReverseProxy is a prominent example for a handler
// performing such writes.
//
-// Upcoming versions of this package will provide ways of instrumenting HTTP
-// handlers that are more flexible and have fewer issues. Please prefer direct
-// instrumentation in the meantime.
+// - It has additional issues with HTTP/2, cf.
+// https://github.com/prometheus/client_golang/issues/272.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
@@ -184,12 +174,13 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
-// InstrumentHandler is.
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
Subsystem: "http",
ConstLabels: Labels{"handler": handlerName},
+ Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
},
handlerFunc,
)
@@ -222,7 +213,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
// SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
-// InstrumentHandler is.
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
@@ -233,7 +224,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
-// as InstrumentHandler is.
+// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{
@@ -245,23 +236,46 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
},
instLabels,
)
+ if err := Register(reqCnt); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqCnt = are.ExistingCollector.(*CounterVec)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "request_duration_microseconds"
opts.Help = "The HTTP request latencies in microseconds."
reqDur := NewSummary(opts)
+ if err := Register(reqDur); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqDur = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "request_size_bytes"
opts.Help = "The HTTP request sizes in bytes."
reqSz := NewSummary(opts)
+ if err := Register(reqSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ reqSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
opts.Name = "response_size_bytes"
opts.Help = "The HTTP response sizes in bytes."
resSz := NewSummary(opts)
-
- regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec)
- regReqDur := MustRegisterOrGet(reqDur).(Summary)
- regReqSz := MustRegisterOrGet(reqSz).(Summary)
- regResSz := MustRegisterOrGet(resSz).(Summary)
+ if err := Register(resSz); err != nil {
+ if are, ok := err.(AlreadyRegisteredError); ok {
+ resSz = are.ExistingCollector.(Summary)
+ } else {
+ panic(err)
+ }
+ }
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
@@ -285,15 +299,15 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
method := sanitizeMethod(r.Method)
code := sanitizeCode(delegate.status)
- regReqCnt.WithLabelValues(method, code).Inc()
- regReqDur.Observe(elapsed)
- regResSz.Observe(float64(delegate.written))
- regReqSz.Observe(float64(<-out))
+ reqCnt.WithLabelValues(method, code).Inc()
+ reqDur.Observe(elapsed)
+ resSz.Observe(float64(delegate.written))
+ reqSz.Observe(float64(<-out))
})
}
func computeApproximateRequestSize(r *http.Request) <-chan int {
- // Get URL length in current go routine for avoiding a race condition.
+ // Get URL length in current goroutine for avoiding a race condition.
// HandlerFunc that runs in parallel may modify the URL.
s := 0
if r.URL != nil {
@@ -328,10 +342,9 @@ func computeApproximateRequestSize(r *http.Request) <-chan int {
type responseWriterDelegator struct {
http.ResponseWriter
- handler, method string
- status int
- written int64
- wroteHeader bool
+ status int
+ written int64
+ wroteHeader bool
}
func (r *responseWriterDelegator) WriteHeader(code int) {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 00000000..2502e373
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,57 @@
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+ if len(labels) != expectedNumberOfValues {
+ return errInconsistentCardinality
+ }
+
+ for name, val := range labels {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+ }
+ }
+
+ return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+ if len(vals) != expectedNumberOfValues {
+ return errInconsistentCardinality
+ }
+
+ for _, val := range vals {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label value %q is not valid UTF-8", val)
+ }
+ }
+
+ return nil
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index d4063d98..76035bca 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -79,20 +79,12 @@ type Opts struct {
// with the same fully-qualified name must have the same label names in
// their ConstLabels.
//
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a metric
- // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels
- // serve only special purposes. One is for the special case where the
- // value of a label does not change during the lifetime of a process,
- // e.g. if the revision of the running binary is put into a
- // label. Another, more advanced purpose is if more than one Collector
- // needs to collect Metrics with the same fully-qualified name. In that
- // case, those Metrics must differ in the values of their
- // ConstLabels. See the Collector examples.
- //
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
ConstLabels Labels
}
@@ -135,20 +127,6 @@ func (s LabelPairSorter) Less(i, j int) bool {
return s[i].GetName() < s[j].GetName()
}
-type hashSorter []uint64
-
-func (s hashSorter) Len() int {
- return len(s)
-}
-
-func (s hashSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s hashSorter) Less(i, j int) bool {
- return s[i] < s[j]
-}
-
type invalidMetric struct {
desc *Desc
err error
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 00000000..5806cd09
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,52 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+ GetMetricWith(Labels) (Observer, error)
+ GetMetricWithLabelValues(lvs ...string) (Observer, error)
+ With(Labels) Observer
+ WithLabelValues(...string) Observer
+ CurryWith(Labels) (ObserverVec, error)
+ MustCurryWith(Labels) ObserverVec
+
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index e31e62e7..5ab2b1c9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -16,18 +16,21 @@ package prometheus
import "github.com/prometheus/procfs"
type processCollector struct {
- pid int
collectFn func(chan<- Metric)
pidFn func() (int, error)
- cpuTotal Counter
- openFDs, maxFDs Gauge
- vsize, rss Gauge
- startTime Gauge
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
}
// NewProcessCollector returns a collector which exports the current state of
-// process metrics including cpu, memory and file descriptor usage as well as
-// the process start time for the given process id under the given namespace.
+// process metrics including CPU, memory and file descriptor usage as well as
+// the process start time for the given process ID under the given namespace.
+//
+// Currently, the collector depends on a Linux-style proc filesystem and
+// therefore only exports metrics for Linux.
func NewProcessCollector(pid int, namespace string) Collector {
return NewProcessCollectorPIDFn(
func() (int, error) { return pid, nil },
@@ -35,49 +38,56 @@ func NewProcessCollector(pid int, namespace string) Collector {
)
}
-// NewProcessCollectorPIDFn returns a collector which exports the current state
-// of process metrics including cpu, memory and file descriptor usage as well
-// as the process start time under the given namespace. The given pidFn is
-// called on each collect and is used to determine the process to export
-// metrics for.
+// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is
+// determined on each collect anew by calling the given pidFn function.
func NewProcessCollectorPIDFn(
pidFn func() (int, error),
namespace string,
) Collector {
+ ns := ""
+ if len(namespace) > 0 {
+ ns = namespace + "_"
+ }
+
c := processCollector{
pidFn: pidFn,
collectFn: func(chan<- Metric) {},
- cpuTotal: NewCounter(CounterOpts{
- Namespace: namespace,
- Name: "process_cpu_seconds_total",
- Help: "Total user and system CPU time spent in seconds.",
- }),
- openFDs: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_open_fds",
- Help: "Number of open file descriptors.",
- }),
- maxFDs: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_max_fds",
- Help: "Maximum number of open file descriptors.",
- }),
- vsize: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_virtual_memory_bytes",
- Help: "Virtual memory size in bytes.",
- }),
- rss: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_resident_memory_bytes",
- Help: "Resident memory size in bytes.",
- }),
- startTime: NewGauge(GaugeOpts{
- Namespace: namespace,
- Name: "process_start_time_seconds",
- Help: "Start time of the process since unix epoch in seconds.",
- }),
+ cpuTotal: NewDesc(
+ ns+"process_cpu_seconds_total",
+ "Total user and system CPU time spent in seconds.",
+ nil, nil,
+ ),
+ openFDs: NewDesc(
+ ns+"process_open_fds",
+ "Number of open file descriptors.",
+ nil, nil,
+ ),
+ maxFDs: NewDesc(
+ ns+"process_max_fds",
+ "Maximum number of open file descriptors.",
+ nil, nil,
+ ),
+ vsize: NewDesc(
+ ns+"process_virtual_memory_bytes",
+ "Virtual memory size in bytes.",
+ nil, nil,
+ ),
+ maxVsize: NewDesc(
+ ns+"process_virtual_memory_max_bytes",
+ "Maximum amount of virtual memory available in bytes.",
+ nil, nil,
+ ),
+ rss: NewDesc(
+ ns+"process_resident_memory_bytes",
+ "Resident memory size in bytes.",
+ nil, nil,
+ ),
+ startTime: NewDesc(
+ ns+"process_start_time_seconds",
+ "Start time of the process since unix epoch in seconds.",
+ nil, nil,
+ ),
}
// Set up process metric collection if supported by the runtime.
@@ -90,12 +100,13 @@ func NewProcessCollectorPIDFn(
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal.Desc()
- ch <- c.openFDs.Desc()
- ch <- c.maxFDs.Desc()
- ch <- c.vsize.Desc()
- ch <- c.rss.Desc()
- ch <- c.startTime.Desc()
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.maxVsize
+ ch <- c.rss
+ ch <- c.startTime
}
// Collect returns the current state of all metrics of the collector.
@@ -117,26 +128,20 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
}
if stat, err := p.NewStat(); err == nil {
- c.cpuTotal.Set(stat.CPUTime())
- ch <- c.cpuTotal
- c.vsize.Set(float64(stat.VirtualMemory()))
- ch <- c.vsize
- c.rss.Set(float64(stat.ResidentMemory()))
- ch <- c.rss
-
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
- c.startTime.Set(startTime)
- ch <- c.startTime
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
}
}
if fds, err := p.FileDescriptorsLen(); err == nil {
- c.openFDs.Set(float64(fds))
- ch <- c.openFDs
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
}
if limits, err := p.NewLimits(); err == nil {
- c.maxFDs.Set(float64(limits.OpenFiles))
- ch <- c.maxFDs
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
}
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 00000000..67b56d37
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ closeNotifier = 1 << iota
+ flusher
+ hijacker
+ readerFrom
+ pusher
+)
+
+type delegator interface {
+ http.ResponseWriter
+
+ Status() int
+ Written() int64
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+ observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+ return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+ return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+ if r.observeWriteHeader != nil {
+ r.observeWriteHeader(code)
+ }
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d closeNotifierDelegator) CloseNotify() <-chan bool {
+ return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d flusherDelegator) Flush() {
+ d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+ if !d.wroteHeader {
+ d.WriteHeader(http.StatusOK)
+ }
+ n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+ d.written += n
+ return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+ // TODO(beorn7): Code generation would help here.
+ pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+ return d
+ }
+ pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+ return closeNotifierDelegator{d}
+ }
+ pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+ return flusherDelegator{d}
+ }
+ pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+ return struct {
+ *responseWriterDelegator
+ http.Flusher
+ http.CloseNotifier
+ }{d, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+ return hijackerDelegator{d}
+ }
+ pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.CloseNotifier
+ }{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ }{d, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+ return readerFromDelegator{d}
+ }
+ pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ }{d, readerFromDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 00000000..31a70695
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
+ return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+ pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+ return pusherDelegator{d}
+ }
+ pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ }{d, pusherDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ }{d, pusherDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ }{d, pusherDelegator{d}, readerFromDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
+ }
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+ if _, ok := w.(http.Pusher); ok {
+ id += pusher
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 00000000..8bb9b8b6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index b6dd5a26..01357374 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -11,21 +11,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
+// Package promhttp provides tooling around HTTP servers and clients.
//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-// Package promhttp contains functions to create http.Handler instances to
-// expose Prometheus metrics via HTTP. In later versions of this package, it
-// will also contain tooling to instrument instances of http.Handler and
-// http.RoundTripper.
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
//
-// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
-// you can create a handler for a custom registry or anything that implements
-// the Gatherer interface. It also allows to create handlers that act
-// differently on errors or allow to log errors.
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
package promhttp
import (
@@ -36,6 +39,7 @@ import (
"net/http"
"strings"
"sync"
+ "time"
"github.com/prometheus/common/expfmt"
@@ -64,21 +68,51 @@ func giveBuf(buf *bytes.Buffer) {
bufPool.Put(buf)
}
-// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
-// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
-// error, no error logging, and compression if requested by the client.
+// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
+// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
+// no error logging, and it applies compression if requested by the client.
+//
+// The returned http.Handler is already instrumented using the
+// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
+// create multiple http.Handlers by separate calls of the Handler function, the
+// metrics used for instrumentation will be shared between them, providing
+// global scrape counts.
//
-// If you want to create a Handler for the DefaultGatherer with different
-// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
-// your desired HandlerOpts.
+// This function is meant to cover the bulk of basic use cases. If you are doing
+// anything that requires more customization (including using a non-default
+// Gatherer, different instrumentation, and non-default HandlerOpts), use the
+// HandlerFor function. See there for details.
func Handler() http.Handler {
- return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
+ return InstrumentMetricHandler(
+ prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
+ )
}
-// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
-// of the Handler is defined by the provided HandlerOpts.
+// HandlerFor returns an uninstrumented http.Handler for the provided
+// Gatherer. The behavior of the Handler is defined by the provided
+// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
+// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
+// instrumentation. Use the InstrumentMetricHandler function to apply the same
+// kind of instrumentation as it is used by the Handler function.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ var inFlightSem chan struct{}
+ if opts.MaxRequestsInFlight > 0 {
+ inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
+ }
+
+ h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+ if inFlightSem != nil {
+ select {
+ case inFlightSem <- struct{}{}: // All good, carry on.
+ defer func() { <-inFlightSem }()
+ default:
+ http.Error(w, fmt.Sprintf(
+ "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
+ ), http.StatusServiceUnavailable)
+ return
+ }
+ }
+
mfs, err := reg.Gather()
if err != nil {
if opts.ErrorLog != nil {
@@ -125,7 +159,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
- http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
@@ -134,9 +168,70 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
if encoding != "" {
header.Set(contentEncodingHeader, encoding)
}
- w.Write(buf.Bytes())
+ if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error while sending encoded metrics:", err)
+ }
// TODO(beorn7): Consider streaming serving of metrics.
})
+
+ if opts.Timeout <= 0 {
+ return h
+ }
+ return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
+ "Exceeded configured timeout of %v.\n",
+ opts.Timeout,
+ ))
+}
+
+// InstrumentMetricHandler is usually used with an http.Handler returned by the
+// HandlerFor function. It instruments the provided http.Handler with two
+// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
+// scrapes partitioned by HTTP status code, and a gauge
+// "promhttp_metric_handler_requests_in_flight" to track the number of
+// simultaneous scrapes. This function idempotently registers collectors for
+// both metrics with the provided Registerer. It panics if the registration
+// fails. The provided metrics are useful to see how many scrapes hit the
+// monitored target (which could be from different Prometheus servers or other
+// scrapers), and how often they overlap (which would result in more than one
+// scrape in flight at the same time). Note that the scrapes-in-flight gauge
+// will contain the scrape by which it is exposed, while the scrape counter will
+// only get incremented after the scrape is complete (as only then the status
+// code is known). For tracking scrape durations, use the
+// "scrape_duration_seconds" gauge created by the Prometheus server upon each
+// scrape.
+func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
+ cnt := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "promhttp_metric_handler_requests_total",
+ Help: "Total number of scrapes by HTTP status code.",
+ },
+ []string{"code"},
+ )
+ // Initialize the most likely HTTP status codes.
+ cnt.WithLabelValues("200")
+ cnt.WithLabelValues("500")
+ cnt.WithLabelValues("503")
+ if err := reg.Register(cnt); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ cnt = are.ExistingCollector.(*prometheus.CounterVec)
+ } else {
+ panic(err)
+ }
+ }
+
+ gge := prometheus.NewGauge(prometheus.GaugeOpts{
+ Name: "promhttp_metric_handler_requests_in_flight",
+ Help: "Current number of scrapes being served.",
+ })
+ if err := reg.Register(gge); err != nil {
+ if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ gge = are.ExistingCollector.(prometheus.Gauge)
+ } else {
+ panic(err)
+ }
+ }
+
+ return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
}
// HandlerErrorHandling defines how a Handler serving metrics will handle
@@ -180,6 +275,21 @@ type HandlerOpts struct {
// If DisableCompression is true, the handler will never compress the
// response, even if requested by the client.
DisableCompression bool
+ // The number of concurrent HTTP requests is limited to
+ // MaxRequestsInFlight. Additional requests are responded to with 503
+ // Service Unavailable and a suitable message in the body. If
+ // MaxRequestsInFlight is 0 or negative, no limit is applied.
+ MaxRequestsInFlight int
+ // If handling a request takes longer than Timeout, it is responded to
+ // with 503 ServiceUnavailable and a suitable Message. No timeout is
+ // applied if Timeout is 0 or negative. Note that with the current
+ // implementation, reaching the timeout simply ends the HTTP requests as
+ // described above (and even that only if sending of the body hasn't
+ // started yet), while the bulk work of gathering all the metrics keeps
+ // running in the background (with the eventual result to be thrown
+ // away). Until the implementation is improved, it is recommended to
+ // implement a separate timeout in potentially slow Collectors.
+ Timeout time.Duration
}
// decorateWriter wraps a writer to handle gzip compression if requested. It
@@ -192,7 +302,7 @@ func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled
header := request.Header.Get(acceptEncodingHeader)
parts := strings.Split(header, ",")
for _, part := range parts {
- part := strings.TrimSpace(part)
+ part = strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return gzip.NewWriter(writer), "gzip"
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 00000000..86fd5644
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ gauge.Inc()
+ defer gauge.Dec()
+ return next.RoundTrip(r)
+ })
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
+// and/or HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(counter)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+ }
+ return resp, err
+ })
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided
+// ObserverVec. The ObserverVec must have zero, one, or two non-const
+// non-curried labels. For those, the only allowed label names are "code" and
+// "method". The function panics otherwise. The Observe method of the Observer
+// in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(obs)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+ }
+ return resp, err
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 00000000..a034d1ec
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+ GotConn func(float64)
+ PutIdleConn func(float64)
+ GotFirstResponseByte func(float64)
+ Got100Continue func(float64)
+ DNSStart func(float64)
+ DNSDone func(float64)
+ ConnectStart func(float64)
+ ConnectDone func(float64)
+ TLSHandshakeStart func(float64)
+ TLSHandshakeDone func(float64)
+ WroteHeaders func(float64)
+ Wait100Continue func(float64)
+ WroteRequest func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+
+ trace := &httptrace.ClientTrace{
+ GotConn: func(_ httptrace.GotConnInfo) {
+ if it.GotConn != nil {
+ it.GotConn(time.Since(start).Seconds())
+ }
+ },
+ PutIdleConn: func(err error) {
+ if err != nil {
+ return
+ }
+ if it.PutIdleConn != nil {
+ it.PutIdleConn(time.Since(start).Seconds())
+ }
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ if it.DNSDone != nil {
+ it.DNSDone(time.Since(start).Seconds())
+ }
+ },
+ ConnectStart: func(_, _ string) {
+ if it.ConnectStart != nil {
+ it.ConnectStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectDone: func(_, _ string, err error) {
+ if err != nil {
+ return
+ }
+ if it.ConnectDone != nil {
+ it.ConnectDone(time.Since(start).Seconds())
+ }
+ },
+ GotFirstResponseByte: func() {
+ if it.GotFirstResponseByte != nil {
+ it.GotFirstResponseByte(time.Since(start).Seconds())
+ }
+ },
+ Got100Continue: func() {
+ if it.Got100Continue != nil {
+ it.Got100Continue(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeStart: func() {
+ if it.TLSHandshakeStart != nil {
+ it.TLSHandshakeStart(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+ if err != nil {
+ return
+ }
+ if it.TLSHandshakeDone != nil {
+ it.TLSHandshakeDone(time.Since(start).Seconds())
+ }
+ },
+ WroteHeaders: func() {
+ if it.WroteHeaders != nil {
+ it.WroteHeaders(time.Since(start).Seconds())
+ }
+ },
+ Wait100Continue: func() {
+ if it.Wait100Continue != nil {
+ it.Wait100Continue(time.Since(start).Seconds())
+ }
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ if it.WroteRequest != nil {
+ it.WroteRequest(time.Since(start).Seconds())
+ }
+ },
+ }
+ r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+ return next.RoundTrip(r)
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 00000000..9db24380
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "errors"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ g.Inc()
+ defer g.Dec()
+ next.ServeHTTP(w, r)
+ })
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ next.ServeHTTP(w, r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ })
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
+// to observe the request result with the provided CounterVec. The CounterVec
+// must have zero, one, or two non-const non-curried labels. For those, the only
+// allowed label names are "code" and "method". The function panics
+// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or
+// HTTP method if the respective instance label names are present in the
+// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(counter)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ counter.With(labels(code, method, r.Method, 0)).Inc()
+ })
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two non-const non-curried labels. For those, the only allowed label names
+// are "code" and "method". The function panics otherwise. The Observe method of
+// the Observer in the ObserverVec is called with the request duration in
+// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
+// respective instance label names are present in the ObserverVec. For
+// unpartitioned observations, use an ObserverVec with zero labels. Note that
+// partitioning of Histograms is expensive and should be used judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, func(status int) {
+ obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+ })
+ next.ServeHTTP(d, r)
+ })
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the request size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ })
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec. The
+// ObserverVec must have zero, one, or two non-const non-curried labels. For
+// those, the only allowed label names are "code" and "method". The function
+// panics otherwise. The Observe method of the Observer in the ObserverVec is
+// called with the response size in bytes. Partitioning happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present in
+// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+ code, method := checkLabels(obs)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ })
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+ // TODO(beorn7): Remove this hacky way to check for instance labels
+ // once Descriptors can have their dimensionality queried.
+ var (
+ desc *prometheus.Desc
+ m prometheus.Metric
+ pm dto.Metric
+ lvs []string
+ )
+
+ // Get the Desc from the Collector.
+ descc := make(chan *prometheus.Desc, 1)
+ c.Describe(descc)
+
+ select {
+ case desc = <-descc:
+ default:
+ panic("no description provided by collector")
+ }
+ select {
+ case <-descc:
+ panic("more than one description provided by collector")
+ default:
+ }
+
+ close(descc)
+
+ // Create a ConstMetric with the Desc. Since we don't know how many
+ // variable labels there are, try for as long as it needs.
+ for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) {
+ m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...)
+ }
+
+ // Write out the metric into a proto message and look at the labels.
+ // If the value is not the magicString, it is a constLabel, which doesn't interest us.
+ // If the label is curried, it doesn't interest us.
+ // In all other cases, only "code" or "method" is allowed.
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString || isLabelCurried(c, name) {
+ continue
+ }
+ switch name {
+ case "code":
+ code = true
+ case "method":
+ method = true
+ default:
+ panic("metric partitioned with non-supported labels")
+ }
+ }
+ return
+}
+
+func isLabelCurried(c prometheus.Collector, label string) bool {
+ // This is even hackier than the label test above.
+ // We essentially try to curry again and see if it works.
+ // But for that, we need to type-convert to the two
+ // types we use here, ObserverVec or *CounterVec.
+ switch v := c.(type) {
+ case *prometheus.CounterVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ case prometheus.ObserverVec:
+ if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil {
+ return false
+ }
+ default:
+ panic("unsupported metric vec type")
+ }
+ return true
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+ if !(code || method) {
+ return emptyLabels
+ }
+ labels := prometheus.Labels{}
+
+ if code {
+ labels["code"] = sanitizeCode(status)
+ }
+ if method {
+ labels["method"] = sanitizeMethod(reqMethod)
+ }
+
+ return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200, 0:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 405d6ddf..896838f1 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,11 +15,13 @@ package prometheus
import (
"bytes"
- "errors"
"fmt"
"os"
+ "runtime"
"sort"
+ "strings"
"sync"
+ "unicode/utf8"
"github.com/golang/protobuf/proto"
@@ -35,13 +37,14 @@ const (
// DefaultRegisterer and DefaultGatherer are the implementations of the
// Registerer and Gatherer interface a number of convenience functions in this
// package act on. Initially, both variables point to the same Registry, which
-// has a process collector (see NewProcessCollector) and a Go collector (see
-// NewGoCollector) already registered. This approach to keep default instances
-// as global state mirrors the approach of other packages in the Go standard
-// library. Note that there are caveats. Change the variables with caution and
-// only if you understand the consequences. Users who want to avoid global state
-// altogether should not use the convenience function and act on custom
-// instances instead.
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
var (
defaultRegistry = NewRegistry()
DefaultRegisterer Registerer = defaultRegistry
@@ -65,7 +68,8 @@ func NewRegistry() *Registry {
// NewPedanticRegistry returns a registry that checks during collection if each
// collected Metric is consistent with its reported Desc, and if the Desc has
-// actually been registered with the registry.
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe methed does not yield any descriptors) are excluded from the check.
//
// Usually, a Registry will be happy as long as the union of all collected
// Metrics is consistent and valid even if some metrics are not consistent with
@@ -80,7 +84,7 @@ func NewPedanticRegistry() *Registry {
// Registerer is the interface for the part of a registry in charge of
// registering and unregistering. Users of custom registries should use
-// Registerer as type for registration purposes (rather then the Registry type
+// Registerer as type for registration purposes (rather than the Registry type
// directly). In that way, they are free to use custom Registerer implementation
// (e.g. for testing purposes).
type Registerer interface {
@@ -95,6 +99,14 @@ type Registerer interface {
// returned error is an instance of AlreadyRegisteredError, which
// contains the previously registered Collector.
//
+ // A Collector whose Describe method does not yield any Desc is treated
+ // as unchecked. Registration will always succeed. No check for
+ // re-registering (see previous paragraph) is performed. Thus, the
+ // caller is responsible for not double-registering the same unchecked
+ // Collector, and for providing a Collector that will not cause
+ // inconsistent metrics on collection. (This would lead to scrape
+ // errors.)
+ //
// It is in general not safe to register the same Collector multiple
// times concurrently.
Register(Collector) error
@@ -105,7 +117,9 @@ type Registerer interface {
// Unregister unregisters the Collector that equals the Collector passed
// in as an argument. (Two Collectors are considered equal if their
// Describe method yields the same set of descriptors.) The function
- // returns whether a Collector was unregistered.
+ // returns whether a Collector was unregistered. Note that an unchecked
+ // Collector cannot be unregistered (as its Describe method does not
+ // yield any descriptor).
//
// Note that even after unregistering, it will not be possible to
// register a new Collector that is inconsistent with the unregistered
@@ -123,15 +137,23 @@ type Registerer interface {
type Gatherer interface {
// Gather calls the Collect method of the registered Collectors and then
// gathers the collected metrics into a lexicographically sorted slice
- // of MetricFamily protobufs. Even if an error occurs, Gather attempts
- // to gather as many metrics as possible. Hence, if a non-nil error is
- // returned, the returned MetricFamily slice could be nil (in case of a
- // fatal error that prevented any meaningful metric collection) or
- // contain a number of MetricFamily protobufs, some of which might be
- // incomplete, and some might be missing altogether. The returned error
- // (which might be a MultiError) explains the details. In scenarios
- // where complete collection is critical, the returned MetricFamily
- // protobufs should be disregarded if the returned error is non-nil.
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
Gather() ([]*dto.MetricFamily, error)
}
@@ -152,38 +174,6 @@ func MustRegister(cs ...Collector) {
DefaultRegisterer.MustRegister(cs...)
}
-// RegisterOrGet registers the provided Collector with the DefaultRegisterer and
-// returns the Collector, unless an equal Collector was registered before, in
-// which case that Collector is returned.
-//
-// Deprecated: RegisterOrGet is merely a convenience function for the
-// implementation as described in the documentation for
-// AlreadyRegisteredError. As the use case is relatively rare, this function
-// will be removed in a future version of this package to clean up the
-// namespace.
-func RegisterOrGet(c Collector) (Collector, error) {
- if err := Register(c); err != nil {
- if are, ok := err.(AlreadyRegisteredError); ok {
- return are.ExistingCollector, nil
- }
- return nil, err
- }
- return c, nil
-}
-
-// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning
-// an error.
-//
-// Deprecated: This is deprecated for the same reason RegisterOrGet is. See
-// there for details.
-func MustRegisterOrGet(c Collector) Collector {
- c, err := RegisterOrGet(c)
- if err != nil {
- panic(err)
- }
- return c
-}
-
// Unregister removes the registration of the provided Collector from the
// DefaultRegisterer.
//
@@ -201,25 +191,6 @@ func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
return gf()
}
-// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that
-// gathers from the previous DefaultGatherers but then merges the MetricFamily
-// protobufs returned from the provided hook function with the MetricFamily
-// protobufs returned from the original DefaultGatherer.
-//
-// Deprecated: This function manipulates the DefaultGatherer variable. Consider
-// the implications, i.e. don't do this concurrently with any uses of the
-// DefaultGatherer. In the rare cases where you need to inject MetricFamily
-// protobufs directly, it is recommended to use a custom Registry and combine it
-// with a custom Gatherer using the Gatherers type (see
-// there). SetMetricFamilyInjectionHook only exists for compatibility reasons
-// with previous versions of this package.
-func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) {
- DefaultGatherer = Gatherers{
- DefaultGatherer,
- GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }),
- }
-}
-
// AlreadyRegisteredError is returned by the Register method if the Collector to
// be registered has already been registered before, or a different Collector
// that collects the same metrics has been registered before. Registration fails
@@ -252,6 +223,13 @@ func (errs MultiError) Error() string {
return buf.String()
}
+// Append appends the provided error if it is not nil.
+func (errs *MultiError) Append(err error) {
+ if err != nil {
+ *errs = append(*errs, err)
+ }
+}
+
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
// contained error as error if len(errs is 1). In all other cases, it returns
// the MultiError directly. This is helpful for returning a MultiError in a way
@@ -276,6 +254,7 @@ type Registry struct {
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
descIDs map[uint64]struct{}
dimHashesByName map[string]uint64
+ uncheckedCollectors []Collector
pedanticChecksEnabled bool
}
@@ -294,7 +273,7 @@ func (r *Registry) Register(c Collector) error {
}()
r.mtx.Lock()
defer r.mtx.Unlock()
- // Coduct various tests...
+ // Conduct various tests...
for desc := range descChan {
// Is the descriptor valid at all?
@@ -333,9 +312,10 @@ func (r *Registry) Register(c Collector) error {
}
}
}
- // Did anything happen at all?
+ // A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 {
- return errors.New("collector has no descriptors")
+ r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+ return nil
}
if existing, exists := r.collectorsByID[collectorID]; exists {
return AlreadyRegisteredError{
@@ -409,31 +389,25 @@ func (r *Registry) MustRegister(cs ...Collector) {
// Gather implements Gatherer.
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
var (
- metricChan = make(chan Metric, capMetricChan)
- metricHashes = map[uint64]struct{}{}
- dimHashes = map[string]uint64{}
- wg sync.WaitGroup
- errs MultiError // The collected errors to return in the end.
- registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
+ checkedMetricChan = make(chan Metric, capMetricChan)
+ uncheckedMetricChan = make(chan Metric, capMetricChan)
+ metricHashes = map[uint64]struct{}{}
+ wg sync.WaitGroup
+ errs MultiError // The collected errors to return in the end.
+ registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
)
r.mtx.RLock()
+ goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
-
- // Scatter.
- // (Collectors could be complex and slow, so we call them all at once.)
- wg.Add(len(r.collectorsByID))
- go func() {
- wg.Wait()
- close(metricChan)
- }()
+ checkedCollectors := make(chan Collector, len(r.collectorsByID))
+ uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
for _, collector := range r.collectorsByID {
- go func(collector Collector) {
- defer wg.Done()
- collector.Collect(metricChan)
- }(collector)
+ checkedCollectors <- collector
+ }
+ for _, collector := range r.uncheckedCollectors {
+ uncheckedCollectors <- collector
}
-
// In case pedantic checks are enabled, we have to copy the map before
// giving up the RLock.
if r.pedanticChecksEnabled {
@@ -442,127 +416,221 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
registeredDescIDs[id] = struct{}{}
}
}
-
r.mtx.RUnlock()
- // Drain metricChan in case of premature return.
+ wg.Add(goroutineBudget)
+
+ collectWorker := func() {
+ for {
+ select {
+ case collector := <-checkedCollectors:
+ collector.Collect(checkedMetricChan)
+ case collector := <-uncheckedCollectors:
+ collector.Collect(uncheckedMetricChan)
+ default:
+ return
+ }
+ wg.Done()
+ }
+ }
+
+ // Start the first worker now to make sure at least one is running.
+ go collectWorker()
+ goroutineBudget--
+
+ // Close checkedMetricChan and uncheckedMetricChan once all collectors
+ // are collected.
+ go func() {
+ wg.Wait()
+ close(checkedMetricChan)
+ close(uncheckedMetricChan)
+ }()
+
+ // Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
defer func() {
- for range metricChan {
+ if checkedMetricChan != nil {
+ for range checkedMetricChan {
+ }
+ }
+ if uncheckedMetricChan != nil {
+ for range uncheckedMetricChan {
+ }
}
}()
- // Gather.
- for metric := range metricChan {
- // This could be done concurrently, too, but it required locking
- // of metricFamiliesByName (and of metricHashes if checks are
- // enabled). Most likely not worth it.
- desc := metric.Desc()
- dtoMetric := &dto.Metric{}
- if err := metric.Write(dtoMetric); err != nil {
- errs = append(errs, fmt.Errorf(
- "error collecting metric %v: %s", desc, err,
+ // Copy the channel references so we can nil them out later to remove
+ // them from the select statements below.
+ cmc := checkedMetricChan
+ umc := uncheckedMetricChan
+
+ for {
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ registeredDescIDs,
))
- continue
- }
- metricFamily, ok := metricFamiliesByName[desc.fqName]
- if ok {
- if metricFamily.GetHelp() != desc.help {
- errs = append(errs, fmt.Errorf(
- "collected metric %s %s has help %q but should have %q",
- desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
- ))
- continue
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
}
- // TODO(beorn7): Simplify switch once Desc has type.
- switch metricFamily.GetType() {
- case dto.MetricType_COUNTER:
- if dtoMetric.Counter == nil {
- errs = append(errs, fmt.Errorf(
- "collected metric %s %s should be a Counter",
- desc.fqName, dtoMetric,
- ))
- continue
- }
- case dto.MetricType_GAUGE:
- if dtoMetric.Gauge == nil {
- errs = append(errs, fmt.Errorf(
- "collected metric %s %s should be a Gauge",
- desc.fqName, dtoMetric,
- ))
- continue
- }
- case dto.MetricType_SUMMARY:
- if dtoMetric.Summary == nil {
- errs = append(errs, fmt.Errorf(
- "collected metric %s %s should be a Summary",
- desc.fqName, dtoMetric,
- ))
- continue
- }
- case dto.MetricType_UNTYPED:
- if dtoMetric.Untyped == nil {
- errs = append(errs, fmt.Errorf(
- "collected metric %s %s should be Untyped",
- desc.fqName, dtoMetric,
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
+ ))
+ default:
+ if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+ // All collectors are already being worked on or
+ // we have already as many goroutines started as
+ // there are collectors. Do the same as above,
+ // just without the default.
+ select {
+ case metric, ok := <-cmc:
+ if !ok {
+ cmc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ registeredDescIDs,
))
- continue
- }
- case dto.MetricType_HISTOGRAM:
- if dtoMetric.Histogram == nil {
- errs = append(errs, fmt.Errorf(
- "collected metric %s %s should be a Histogram",
- desc.fqName, dtoMetric,
+ case metric, ok := <-umc:
+ if !ok {
+ umc = nil
+ break
+ }
+ errs.Append(processMetric(
+ metric, metricFamiliesByName,
+ metricHashes,
+ nil,
))
- continue
}
- default:
- panic("encountered MetricFamily with invalid type")
+ break
}
- } else {
- metricFamily = &dto.MetricFamily{}
- metricFamily.Name = proto.String(desc.fqName)
- metricFamily.Help = proto.String(desc.help)
- // TODO(beorn7): Simplify switch once Desc has type.
- switch {
- case dtoMetric.Gauge != nil:
- metricFamily.Type = dto.MetricType_GAUGE.Enum()
- case dtoMetric.Counter != nil:
- metricFamily.Type = dto.MetricType_COUNTER.Enum()
- case dtoMetric.Summary != nil:
- metricFamily.Type = dto.MetricType_SUMMARY.Enum()
- case dtoMetric.Untyped != nil:
- metricFamily.Type = dto.MetricType_UNTYPED.Enum()
- case dtoMetric.Histogram != nil:
- metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
- default:
- errs = append(errs, fmt.Errorf(
- "empty metric collected: %s", dtoMetric,
- ))
- continue
- }
- metricFamiliesByName[desc.fqName] = metricFamily
+ // Start more workers.
+ go collectWorker()
+ goroutineBudget--
+ runtime.Gosched()
}
- if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
- errs = append(errs, err)
- continue
+ // Once both checkedMetricChan and uncheckdMetricChan are closed
+ // and drained, the contraption above will nil out cmc and umc,
+ // and then we can leave the collect loop here.
+ if cmc == nil && umc == nil {
+ break
}
- if r.pedanticChecksEnabled {
- // Is the desc registered at all?
- if _, exist := registeredDescIDs[desc.id]; !exist {
- errs = append(errs, fmt.Errorf(
- "collected metric %s %s with unregistered descriptor %s",
- metricFamily.GetName(), dtoMetric, desc,
- ))
- continue
+ }
+ return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// processMetric is an internal helper method only used by the Gather method.
+func processMetric(
+ metric Metric,
+ metricFamiliesByName map[string]*dto.MetricFamily,
+ metricHashes map[uint64]struct{},
+ registeredDescIDs map[uint64]struct{},
+) error {
+ desc := metric.Desc()
+ dtoMetric := &dto.Metric{}
+ if err := metric.Write(dtoMetric); err != nil {
+ return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ }
+ metricFamily, ok := metricFamiliesByName[desc.fqName]
+ if ok { // Existing name.
+ if metricFamily.GetHelp() != desc.help {
+ return fmt.Errorf(
+ "collected metric %s %s has help %q but should have %q",
+ desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+ )
+ }
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch metricFamily.GetType() {
+ case dto.MetricType_COUNTER:
+ if dtoMetric.Counter == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Counter",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_GAUGE:
+ if dtoMetric.Gauge == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Gauge",
+ desc.fqName, dtoMetric,
+ )
+ }
+ case dto.MetricType_SUMMARY:
+ if dtoMetric.Summary == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Summary",
+ desc.fqName, dtoMetric,
+ )
}
- if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
- errs = append(errs, err)
- continue
+ case dto.MetricType_UNTYPED:
+ if dtoMetric.Untyped == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be Untyped",
+ desc.fqName, dtoMetric,
+ )
}
+ case dto.MetricType_HISTOGRAM:
+ if dtoMetric.Histogram == nil {
+ return fmt.Errorf(
+ "collected metric %s %s should be a Histogram",
+ desc.fqName, dtoMetric,
+ )
+ }
+ default:
+ panic("encountered MetricFamily with invalid type")
+ }
+ } else { // New name.
+ metricFamily = &dto.MetricFamily{}
+ metricFamily.Name = proto.String(desc.fqName)
+ metricFamily.Help = proto.String(desc.help)
+ // TODO(beorn7): Simplify switch once Desc has type.
+ switch {
+ case dtoMetric.Gauge != nil:
+ metricFamily.Type = dto.MetricType_GAUGE.Enum()
+ case dtoMetric.Counter != nil:
+ metricFamily.Type = dto.MetricType_COUNTER.Enum()
+ case dtoMetric.Summary != nil:
+ metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+ case dtoMetric.Untyped != nil:
+ metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+ case dtoMetric.Histogram != nil:
+ metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+ default:
+ return fmt.Errorf("empty metric collected: %s", dtoMetric)
}
- metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+ return err
+ }
+ metricFamiliesByName[desc.fqName] = metricFamily
}
- return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+ if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
+ return err
+ }
+ if registeredDescIDs != nil {
+ // Is the desc registered at all?
+ if _, exist := registeredDescIDs[desc.id]; !exist {
+ return fmt.Errorf(
+ "collected metric %s %s with unregistered descriptor %s",
+ metricFamily.GetName(), dtoMetric, desc,
+ )
+ }
+ if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+ return err
+ }
+ }
+ metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+ return nil
}
// Gatherers is a slice of Gatherer instances that implements the Gatherer
@@ -588,7 +656,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
var (
metricFamiliesByName = map[string]*dto.MetricFamily{}
metricHashes = map[uint64]struct{}{}
- dimHashes = map[string]uint64{}
errs MultiError // The collected errors to return in the end.
)
@@ -625,10 +692,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
existingMF.Name = mf.Name
existingMF.Help = mf.Help
existingMF.Type = mf.Type
+ if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+ errs = append(errs, err)
+ continue
+ }
metricFamiliesByName[mf.GetName()] = existingMF
}
for _, m := range mf.Metric {
- if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
+ if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
errs = append(errs, err)
continue
}
@@ -704,19 +775,74 @@ func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily)
return result
}
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+ var (
+ newName = mf.GetName()
+ newType = mf.GetType()
+ newNameWithoutSuffix = ""
+ )
+ switch {
+ case strings.HasSuffix(newName, "_count"):
+ newNameWithoutSuffix = newName[:len(newName)-6]
+ case strings.HasSuffix(newName, "_sum"):
+ newNameWithoutSuffix = newName[:len(newName)-4]
+ case strings.HasSuffix(newName, "_bucket"):
+ newNameWithoutSuffix = newName[:len(newName)-7]
+ }
+ if newNameWithoutSuffix != "" {
+ if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+ switch existingMF.GetType() {
+ case dto.MetricType_SUMMARY:
+ if !strings.HasSuffix(newName, "_bucket") {
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected summary named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ case dto.MetricType_HISTOGRAM:
+ return fmt.Errorf(
+ "collected metric named %q collides with previously collected histogram named %q",
+ newName, newNameWithoutSuffix,
+ )
+ }
+ }
+ }
+ if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_count"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_count",
+ )
+ }
+ if _, ok := mfs[newName+"_sum"]; ok {
+ return fmt.Errorf(
+ "collected histogram or summary named %q collides with previously collected metric named %q",
+ newName, newName+"_sum",
+ )
+ }
+ }
+ if newType == dto.MetricType_HISTOGRAM {
+ if _, ok := mfs[newName+"_bucket"]; ok {
+ return fmt.Errorf(
+ "collected histogram named %q collides with previously collected metric named %q",
+ newName, newName+"_bucket",
+ )
+ }
+ }
+ return nil
+}
+
// checkMetricConsistency checks if the provided Metric is consistent with the
-// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
-// name. If the resulting hash is alread in the provided metricHashes, an error
-// is returned. If not, it is added to metricHashes. The provided dimHashes maps
-// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
-// doesn't yet contain a hash for the provided MetricFamily, it is
-// added. Otherwise, an error is returned if the existing dimHashes in not equal
-// the calculated dimHash.
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
+// name. If the resulting hash is already in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes.
func checkMetricConsistency(
metricFamily *dto.MetricFamily,
dtoMetric *dto.Metric,
metricHashes map[uint64]struct{},
- dimHashes map[string]uint64,
) error {
// Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
@@ -725,41 +851,50 @@ func checkMetricConsistency(
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf(
- "collected metric %s %s is not a %s",
+ "collected metric %q { %s} is not a %s",
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
)
}
- // Is the metric unique (i.e. no other metric with the same name and the same label values)?
+ for _, labelPair := range dtoMetric.GetLabel() {
+ if !checkLabelName(labelPair.GetName()) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label with an invalid name: %s",
+ metricFamily.GetName(), dtoMetric, labelPair.GetName(),
+ )
+ }
+ if dtoMetric.Summary != nil && labelPair.GetName() == quantileLabel {
+ return fmt.Errorf(
+ "collected metric %q { %s} must not have an explicit %q label",
+ metricFamily.GetName(), dtoMetric, quantileLabel,
+ )
+ }
+ if !utf8.ValidString(labelPair.GetValue()) {
+ return fmt.Errorf(
+ "collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
+ metricFamily.GetName(), dtoMetric, labelPair.GetName(), labelPair.GetValue())
+ }
+ }
+
+ // Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew()
h = hashAdd(h, metricFamily.GetName())
h = hashAddByte(h, separatorByte)
- dh := hashNew()
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
sort.Sort(LabelPairSorter(dtoMetric.Label))
for _, lp := range dtoMetric.Label {
+ h = hashAdd(h, lp.GetName())
+ h = hashAddByte(h, separatorByte)
h = hashAdd(h, lp.GetValue())
h = hashAddByte(h, separatorByte)
- dh = hashAdd(dh, lp.GetName())
- dh = hashAddByte(dh, separatorByte)
}
if _, exists := metricHashes[h]; exists {
return fmt.Errorf(
- "collected metric %s %s was collected before with the same name and label values",
+ "collected metric %q { %s} was collected before with the same name and label values",
metricFamily.GetName(), dtoMetric,
)
}
- if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
- if dimHash != dh {
- return fmt.Errorf(
- "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
- metricFamily.GetName(), dtoMetric,
- )
- }
- } else {
- dimHashes[metricFamily.GetName()] = dh
- }
metricHashes[h] = struct{}{}
return nil
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index bce05bf9..83b403c5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -36,7 +36,10 @@ const quantileLabel = "quantile"
//
// A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency
-// as rank estimations.
+// as rank estimations. However, the default behavior will change in the
+// upcoming v0.10 of the library. There will be no rank estiamtions at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
//
// Note that the rank estimations cannot be aggregated in a meaningful way with
// the Prometheus query language (i.e. you cannot average or add them). If you
@@ -54,6 +57,9 @@ type Summary interface {
}
// DefObjectives are the default Summary quantile values.
+//
+// Deprecated: DefObjectives will not be used as the default objectives in
+// v0.10 of the library. The default Summary will have no quantiles then.
var (
DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}
@@ -75,8 +81,10 @@ const (
)
// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name and Help to a non-empty string. All other fields are
-// optional and can safely be left at their zero value.
+// mandatory to set Name and Help to a non-empty string. While all other fields
+// are optional and can safely be left at their zero value, it is recommended to
+// explicitly set the Objectives field to the desired value as the default value
+// will change in the upcoming v0.10 of the library.
type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with
@@ -93,29 +101,33 @@ type SummaryOpts struct {
// string.
Help string
- // ConstLabels are used to attach fixed labels to this
- // Summary. Summaries with the same fully-qualified name must have the
- // same label names in their ConstLabels.
+ // ConstLabels are used to attach fixed labels to this metric. Metrics
+ // with the same fully-qualified name must have the same label names in
+ // their ConstLabels.
//
- // Note that in most cases, labels have a value that varies during the
- // lifetime of a process. Those labels are usually managed with a
- // SummaryVec. ConstLabels serve only special purposes. One is for the
- // special case where the value of a label does not change during the
- // lifetime of a process, e.g. if the revision of the running binary is
- // put into a label. Another, more advanced purpose is if more than one
- // Collector needs to collect Summaries with the same fully-qualified
- // name. In that case, those Summaries must differ in the values of
- // their ConstLabels. See the Collector examples.
+ // Due to the way a Summary is represented in the Prometheus text format
+ // and how it is handled by the Prometheus server internally, “quantile”
+ // is an illegal label name. Construction of a Summary or SummaryVec
+ // will panic if this label name is used in ConstLabels.
//
- // If the value of a label never changes (not even between binaries),
- // that label most likely should not be a label at all (but part of the
- // metric name).
+ // ConstLabels are only used rarely. In particular, do not use them to
+ // attach the same labels to all your metrics. Those use cases are
+ // better covered by target labels set by the scraping Prometheus
+ // server, or by one specific metric (e.g. a build_info or a
+ // machine_role metric). See also
+ // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels
ConstLabels Labels
// Objectives defines the quantile rank estimates with their respective
- // absolute error. If Objectives[q] = e, then the value reported
- // for q will be the φ-quantile value for some φ between q-e and q+e.
- // The default value is DefObjectives.
+ // absolute error. If Objectives[q] = e, then the value reported for q
+ // will be the φ-quantile value for some φ between q-e and q+e. The
+ // default value is DefObjectives. It is used if Objectives is left at
+ // its zero value (i.e. nil). To create a Summary without Objectives,
+ // set it to an empty map (i.e. map[float64]float64{}).
+ //
+ // Deprecated: Note that the current value of DefObjectives is
+ // deprecated. It will be replaced by an empty map in v0.10 of the
+ // library. Please explicitly set Objectives to the desired value.
Objectives map[float64]float64
// MaxAge defines the duration for which an observation stays relevant
@@ -183,7 +195,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
}
}
- if len(opts.Objectives) == 0 {
+ if opts.Objectives == nil {
opts.Objectives = DefObjectives
}
@@ -390,13 +402,21 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
- *MetricVec
+ *metricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
+//
+// Due to the way a Summary is represented in the Prometheus text format and how
+// it is handled by the Prometheus server internally, “quantile” is an illegal
+// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+ for _, ln := range labelNames {
+ if ln == quantileLabel {
+ panic(errQuantileLabelNotAllowed)
+ }
+ }
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
@@ -404,47 +424,116 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Summary and not a
-// Metric so that no type conversion is required.
-func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
+// the Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := v.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
- return metric.(Summary), err
+ return metric.(Observer), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Summary and not a Metric so that no
-// type conversion is required.
-func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := v.metricVec.getMetricWith(labels)
if metric != nil {
- return metric.(Summary), err
+ return metric.(Observer), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
- return m.MetricVec.WithLabelValues(lvs...).(Summary)
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
+ s, err := v.GetMetricWithLabelValues(lvs...)
+ if err != nil {
+ panic(err)
+ }
+ return s
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *SummaryVec) With(labels Labels) Summary {
- return m.MetricVec.With(labels).(Summary)
+// returned an error. Not returning an error allows shortcuts like
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *SummaryVec) With(labels Labels) Observer {
+ s, err := v.GetMetricWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return s
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the SummaryVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
+ vec, err := v.curryWith(labels)
+ if vec != nil {
+ return &SummaryVec{vec}, err
+ }
+ return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
+ vec, err := v.CurryWith(labels)
+ if err != nil {
+ panic(err)
+ }
+ return vec
}
type constSummary struct {
@@ -505,8 +594,8 @@ func NewConstSummary(
quantiles map[float64]float64,
labelValues ...string,
) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constSummary{
desc: desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 00000000..b8fc5f18
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+ begin time.Time
+ observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. Timer is usually used to time a function call in the
+// following way:
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+func NewTimer(o Observer) *Timer {
+ return &Timer{
+ begin: time.Now(),
+ observer: o,
+ }
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. ObserveDuration is
+// usually called with a defer statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func (t *Timer) ObserveDuration() {
+ if t.observer != nil {
+ t.observer.Observe(time.Since(t.begin).Seconds())
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
index 5faf7e6e..0f9ce63f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -13,108 +13,12 @@
package prometheus
-// Untyped is a Metric that represents a single numerical value that can
-// arbitrarily go up and down.
-//
-// An Untyped metric works the same as a Gauge. The only difference is that to
-// no type information is implied.
-//
-// To create Untyped instances, use NewUntyped.
-type Untyped interface {
- Metric
- Collector
-
- // Set sets the Untyped metric to an arbitrary value.
- Set(float64)
- // Inc increments the Untyped metric by 1.
- Inc()
- // Dec decrements the Untyped metric by 1.
- Dec()
- // Add adds the given value to the Untyped metric. (The value can be
- // negative, resulting in a decrease.)
- Add(float64)
- // Sub subtracts the given value from the Untyped metric. (The value can
- // be negative, resulting in an increase.)
- Sub(float64)
-}
-
// UntypedOpts is an alias for Opts. See there for doc comments.
type UntypedOpts Opts
-// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
-func NewUntyped(opts UntypedOpts) Untyped {
- return newValue(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), UntypedValue, 0)
-}
-
-// UntypedVec is a Collector that bundles a set of Untyped metrics that all
-// share the same Desc, but have different values for their variable
-// labels. This is used if you want to count the same thing partitioned by
-// various dimensions. Create instances with NewUntypedVec.
-type UntypedVec struct {
- *MetricVec
-}
-
-// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
-func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &UntypedVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
- return newValue(desc, UntypedValue, 0, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns an Untyped and not a
-// Metric so that no type conversion is required.
-func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Untyped), err
- }
- return nil, err
-}
-
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns an Untyped and not a Metric so that no
-// type conversion is required.
-func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Untyped), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
- return m.MetricVec.WithLabelValues(lvs...).(Untyped)
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *UntypedVec) With(labels Labels) Untyped {
- return m.MetricVec.With(labels).(Untyped)
-}
-
-// UntypedFunc is an Untyped whose value is determined at collect time by
-// calling a provided function.
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
//
// To create UntypedFunc instances, use NewUntypedFunc.
type UntypedFunc interface {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index a944c377..9fb7eab0 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -14,11 +14,8 @@
package prometheus
import (
- "errors"
"fmt"
- "math"
"sort"
- "sync/atomic"
dto "github.com/prometheus/client_model/go"
@@ -36,77 +33,6 @@ const (
UntypedValue
)
-var errInconsistentCardinality = errors.New("inconsistent label cardinality")
-
-// value is a generic metric for simple values. It implements Metric, Collector,
-// Counter, Gauge, and Untyped. Its effective type is determined by
-// ValueType. This is a low-level building block used by the library to back the
-// implementations of Counter, Gauge, and Untyped.
-type value struct {
- // valBits containst the bits of the represented float64 value. It has
- // to go first in the struct to guarantee alignment for atomic
- // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
- valBits uint64
-
- selfCollector
-
- desc *Desc
- valType ValueType
- labelPairs []*dto.LabelPair
-}
-
-// newValue returns a newly allocated value with the given Desc, ValueType,
-// sample value and label values. It panics if the number of label
-// values is different from the number of variable labels in Desc.
-func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
- if len(labelValues) != len(desc.variableLabels) {
- panic(errInconsistentCardinality)
- }
- result := &value{
- desc: desc,
- valType: valueType,
- valBits: math.Float64bits(val),
- labelPairs: makeLabelPairs(desc, labelValues),
- }
- result.init(result)
- return result
-}
-
-func (v *value) Desc() *Desc {
- return v.desc
-}
-
-func (v *value) Set(val float64) {
- atomic.StoreUint64(&v.valBits, math.Float64bits(val))
-}
-
-func (v *value) Inc() {
- v.Add(1)
-}
-
-func (v *value) Dec() {
- v.Add(-1)
-}
-
-func (v *value) Add(val float64) {
- for {
- oldBits := atomic.LoadUint64(&v.valBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
- if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
- return
- }
- }
-}
-
-func (v *value) Sub(val float64) {
- v.Add(val * -1)
-}
-
-func (v *value) Write(out *dto.Metric) error {
- val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
- return populateMetric(v.valType, val, v.labelPairs, out)
-}
-
// valueFunc is a generic metric for simple values retrieved on collect time
// from a function. It implements Metric and Collector. Its effective type is
// determined by ValueType. This is a low-level building block used by the
@@ -153,8 +79,8 @@ func (v *valueFunc) Write(out *dto.Metric) error {
// the Collect method. NewConstMetric returns an error if the length of
// labelValues is not consistent with the variable labels in Desc.
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constMetric{
desc: desc,
@@ -226,9 +152,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
Value: proto.String(labelValues[i]),
})
}
- for _, lp := range desc.constLabelPairs {
- labelPairs = append(labelPairs, lp)
- }
+ labelPairs = append(labelPairs, desc.constLabelPairs...)
sort.Sort(LabelPairSorter(labelPairs))
return labelPairs
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 7f3eef9a..14ed9e85 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -20,200 +20,253 @@ import (
"github.com/prometheus/common/model"
)
-// MetricVec is a Collector to bundle metrics of the same name that
-// differ in their label values. MetricVec is usually not used directly but as a
-// building block for implementations of vectors of a given metric
-// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
-// provided in this package.
-type MetricVec struct {
- mtx sync.RWMutex // Protects the children.
- children map[uint64][]metricWithLabelValues
- desc *Desc
-
- newMetric func(labelValues ...string) Metric
- hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling
+// metricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. metricVec is not used directly (and therefore
+// unexported). It is used as a building block for implementations of vectors of
+// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec.
+// It also handles label currying. It uses basicMetricVec internally.
+type metricVec struct {
+ *metricMap
+
+ curry []curriedLabelValue
+
+ // hashAdd and hashAddByte can be replaced for testing collision handling.
+ hashAdd func(h uint64, s string) uint64
hashAddByte func(h uint64, b byte) uint64
}
-// newMetricVec returns an initialized MetricVec. The concrete value is
-// returned for embedding into another struct.
-func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
- return &MetricVec{
- children: map[uint64][]metricWithLabelValues{},
- desc: desc,
- newMetric: newMetric,
+// newMetricVec returns an initialized metricVec.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
+ return &metricVec{
+ metricMap: &metricMap{
+ metrics: map[uint64][]metricWithLabelValues{},
+ desc: desc,
+ newMetric: newMetric,
+ },
hashAdd: hashAdd,
hashAddByte: hashAddByte,
}
}
-// metricWithLabelValues provides the metric and its label values for
-// disambiguation on hash collision.
-type metricWithLabelValues struct {
- values []string
- metric Metric
-}
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
+ h, err := m.hashLabelValues(lvs)
+ if err != nil {
+ return false
+ }
-// Describe implements Collector. The length of the returned slice
-// is always one.
-func (m *MetricVec) Describe(ch chan<- *Desc) {
- ch <- m.desc
+ return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
}
-// Collect implements Collector.
-func (m *MetricVec) Collect(ch chan<- Metric) {
- m.mtx.RLock()
- defer m.mtx.RUnlock()
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *metricVec) Delete(labels Labels) bool {
+ h, err := m.hashLabels(labels)
+ if err != nil {
+ return false
+ }
- for _, metrics := range m.children {
- for _, metric := range metrics {
- ch <- metric.metric
+ return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+func (m *metricVec) curryWith(labels Labels) (*metricVec, error) {
+ var (
+ newCurry []curriedLabelValue
+ oldCurry = m.curry
+ iCurry int
+ )
+ for i, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
+ if ok {
+ return nil, fmt.Errorf("label name %q is already curried", label)
+ }
+ newCurry = append(newCurry, oldCurry[iCurry])
+ iCurry++
+ } else {
+ if !ok {
+ continue // Label stays uncurried.
+ }
+ newCurry = append(newCurry, curriedLabelValue{i, val})
}
}
+ if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
+ return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
+ }
+
+ return &metricVec{
+ metricMap: m.metricMap,
+ curry: newCurry,
+ hashAdd: m.hashAdd,
+ hashAddByte: m.hashAddByte,
+ }, nil
}
-// GetMetricWithLabelValues returns the Metric for the given slice of label
-// values (same order as the VariableLabels in Desc). If that combination of
-// label values is accessed for the first time, a new Metric is created.
-//
-// It is possible to call this method without using the returned Metric to only
-// create the new Metric but leave it at its start value (e.g. a Summary or
-// Histogram without any observations). See also the SummaryVec example.
-//
-// Keeping the Metric for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Metric from the MetricVec. In that case, the
-// Metric will still exist, but it will not be exported anymore, even if a
-// Metric with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of VariableLabels in Desc.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
- return m.getOrCreateMetricWithLabelValues(h, lvs), nil
+ return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
-// GetMetricWith returns the Metric for the given Labels map (the label names
-// must match those of the VariableLabels in Desc). If that label map is
-// accessed for the first time, a new Metric is created. Implications of
-// creating a Metric without using it and keeping the Metric for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc.
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
- return m.getOrCreateMetricWithLabels(h, labels), nil
+ return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
-// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
-// occurs. The method allows neat syntax like:
-// httpReqs.WithLabelValues("404", "POST").Inc()
-func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
- metric, err := m.GetMetricWithLabelValues(lvs...)
- if err != nil {
- panic(err)
+func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ return 0, err
+ }
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iVals, iCurry int
+ )
+ for i := 0; i < len(m.desc.variableLabels); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ h = m.hashAdd(h, vals[iVals])
+ iVals++
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
}
- return metric
+ return h, nil
}
-// With works as GetMetricWith, but panics if an error occurs. The method allows
-// neat syntax like:
-// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
-func (m *MetricVec) With(labels Labels) Metric {
- metric, err := m.GetMetricWith(labels)
- if err != nil {
- panic(err)
+func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ return 0, err
}
- return metric
+
+ var (
+ h = hashNew()
+ curry = m.curry
+ iCurry int
+ )
+ for i, label := range m.desc.variableLabels {
+ val, ok := labels[label]
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if ok {
+ return 0, fmt.Errorf("label name %q is already curried", label)
+ }
+ h = m.hashAdd(h, curry[iCurry].value)
+ iCurry++
+ } else {
+ if !ok {
+ return 0, fmt.Errorf("label name %q missing in label map", label)
+ }
+ h = m.hashAdd(h, val)
+ }
+ h = m.hashAddByte(h, model.SeparatorByte)
+ }
+ return h, nil
}
-// DeleteLabelValues removes the metric where the variable labels are the same
-// as those passed in as labels (same order as the VariableLabels in Desc). It
-// returns true if a metric was deleted.
-//
-// It is not an error if the number of label values is not the same as the
-// number of VariableLabels in Desc. However, such inconsistent label count can
-// never match an actual Metric, so the method will always return false in that
-// case.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider Delete(Labels) as an
-// alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the CounterVec example.
-func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
- m.mtx.Lock()
- defer m.mtx.Unlock()
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+ values []string
+ metric Metric
+}
- h, err := m.hashLabelValues(lvs)
- if err != nil {
- return false
+// curriedLabelValue sets the curried value for a label at the given index.
+type curriedLabelValue struct {
+ index int
+ value string
+}
+
+// metricMap is a helper for metricVec and shared between differently curried
+// metricVecs.
+type metricMap struct {
+ mtx sync.RWMutex // Protects metrics.
+ metrics map[uint64][]metricWithLabelValues
+ desc *Desc
+ newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. It will send exactly one Desc to the provided
+// channel.
+func (m *metricMap) Describe(ch chan<- *Desc) {
+ ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *metricMap) Collect(ch chan<- Metric) {
+ m.mtx.RLock()
+ defer m.mtx.RUnlock()
+
+ for _, metrics := range m.metrics {
+ for _, metric := range metrics {
+ ch <- metric.metric
+ }
}
- return m.deleteByHashWithLabelValues(h, lvs)
}
-// Delete deletes the metric where the variable labels are the same as those
-// passed in as labels. It returns true if a metric was deleted.
-//
-// It is not an error if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in the Desc of the MetricVec. However, such
-// inconsistent Labels can never match an actual Metric, so the method will
-// always return false in that case.
-//
-// This method is used for the same purpose as DeleteLabelValues(...string). See
-// there for pros and cons of the two methods.
-func (m *MetricVec) Delete(labels Labels) bool {
+// Reset deletes all metrics in this vector.
+func (m *metricMap) Reset() {
m.mtx.Lock()
defer m.mtx.Unlock()
- h, err := m.hashLabels(labels)
- if err != nil {
- return false
+ for h := range m.metrics {
+ delete(m.metrics, h)
}
-
- return m.deleteByHashWithLabels(h, labels)
}
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric.
-func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
- metrics, ok := m.children[h]
+func (m *metricMap) deleteByHashWithLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
if !ok {
return false
}
- i := m.findMetricWithLabelValues(metrics, lvs)
+ i := findMetricWithLabelValues(metrics, lvs, curry)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
- m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
} else {
- delete(m.children, h)
+ delete(m.metrics, h)
}
return true
}
@@ -221,69 +274,38 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric.
-func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
- metrics, ok := m.children[h]
+func (m *metricMap) deleteByHashWithLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) bool {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ metrics, ok := m.metrics[h]
if !ok {
return false
}
- i := m.findMetricWithLabels(metrics, labels)
+ i := findMetricWithLabels(m.desc, metrics, labels, curry)
if i >= len(metrics) {
return false
}
if len(metrics) > 1 {
- m.children[h] = append(metrics[:i], metrics[i+1:]...)
+ m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
} else {
- delete(m.children, h)
+ delete(m.metrics, h)
}
return true
}
-// Reset deletes all metrics in this vector.
-func (m *MetricVec) Reset() {
- m.mtx.Lock()
- defer m.mtx.Unlock()
-
- for h := range m.children {
- delete(m.children, h)
- }
-}
-
-func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if len(vals) != len(m.desc.variableLabels) {
- return 0, errInconsistentCardinality
- }
- h := hashNew()
- for _, val := range vals {
- h = m.hashAdd(h, val)
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
-func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if len(labels) != len(m.desc.variableLabels) {
- return 0, errInconsistentCardinality
- }
- h := hashNew()
- for _, label := range m.desc.variableLabels {
- val, ok := labels[label]
- if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
- }
- h = m.hashAdd(h, val)
- h = m.hashAddByte(h, model.SeparatorByte)
- }
- return h, nil
-}
-
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
-func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+func (m *metricMap) getOrCreateMetricWithLabelValues(
+ hash uint64, lvs []string, curry []curriedLabelValue,
+) Metric {
m.mtx.RLock()
- metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
m.mtx.RUnlock()
if ok {
return metric
@@ -291,13 +313,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
m.mtx.Lock()
defer m.mtx.Unlock()
- metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
if !ok {
- // Copy to avoid allocation in case wo don't go down this code path.
- copiedLVs := make([]string, len(lvs))
- copy(copiedLVs, lvs)
- metric = m.newMetric(copiedLVs...)
- m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})
+ inlinedLVs := inlineLabelValues(lvs, curry)
+ metric = m.newMetric(inlinedLVs...)
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
}
return metric
}
@@ -306,9 +326,11 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
// or creates it and returns the new one.
//
// This function holds the mutex.
-func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+func (m *metricMap) getOrCreateMetricWithLabels(
+ hash uint64, labels Labels, curry []curriedLabelValue,
+) Metric {
m.mtx.RLock()
- metric, ok := m.getMetricWithLabels(hash, labels)
+ metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
m.mtx.RUnlock()
if ok {
return metric
@@ -316,33 +338,37 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr
m.mtx.Lock()
defer m.mtx.Unlock()
- metric, ok = m.getMetricWithLabels(hash, labels)
+ metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
if !ok {
- lvs := m.extractLabelValues(labels)
+ lvs := extractLabelValues(m.desc, labels, curry)
metric = m.newMetric(lvs...)
- m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})
+ m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
}
return metric
}
-// getMetricWithLabelValues gets a metric while handling possible collisions in
-// the hash space. Must be called while holding read mutex.
-func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
- metrics, ok := m.children[h]
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricMap) getMetricWithHashAndLabelValues(
+ h uint64, lvs []string, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
if ok {
- if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
+ if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
return metrics[i].metric, true
}
}
return nil, false
}
-// getMetricWithLabels gets a metric while handling possible collisions in
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
-func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
- metrics, ok := m.children[h]
+func (m *metricMap) getMetricWithHashAndLabels(
+ h uint64, labels Labels, curry []curriedLabelValue,
+) (Metric, bool) {
+ metrics, ok := m.metrics[h]
if ok {
- if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
+ if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
return metrics[i].metric, true
}
}
@@ -351,9 +377,11 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool)
// findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found.
-func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+func findMetricWithLabelValues(
+ metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
+) int {
for i, metric := range metrics {
- if m.matchLabelValues(metric.values, lvs) {
+ if matchLabelValues(metric.values, lvs, curry) {
return i
}
}
@@ -362,32 +390,51 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l
// findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found.
-func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+func findMetricWithLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
for i, metric := range metrics {
- if m.matchLabels(metric.values, labels) {
+ if matchLabels(desc, metric.values, labels, curry) {
return i
}
}
return len(metrics)
}
-func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
- if len(values) != len(lvs) {
+func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+ if len(values) != len(lvs)+len(curry) {
return false
}
+ var iLVs, iCurry int
for i, v := range values {
- if v != lvs[i] {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if v != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
+ if v != lvs[iLVs] {
return false
}
+ iLVs++
}
return true
}
-func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
- if len(labels) != len(values) {
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ if len(values) != len(labels)+len(curry) {
return false
}
- for i, k := range m.desc.variableLabels {
+ iCurry := 0
+ for i, k := range desc.variableLabels {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ if values[i] != curry[iCurry].value {
+ return false
+ }
+ iCurry++
+ continue
+ }
if values[i] != labels[k] {
return false
}
@@ -395,10 +442,31 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
return true
}
-func (m *MetricVec) extractLabelValues(labels Labels) []string {
- labelValues := make([]string, len(labels))
- for i, k := range m.desc.variableLabels {
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(labels)+len(curry))
+ iCurry := 0
+ for i, k := range desc.variableLabels {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
labelValues[i] = labels[k]
}
return labelValues
}
+
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
+ labelValues := make([]string, len(lvs)+len(curry))
+ var iCurry, iLVs int
+ for i := range labelValues {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ labelValues[i] = curry[iCurry].value
+ iCurry++
+ continue
+ }
+ labelValues[i] = lvs[iLVs]
+ iLVs++
+ }
+ return labelValues
+}
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index b065f868..9805432c 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,34 +1,23 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
// source: metrics.proto
-// DO NOT EDIT!
-
-/*
-Package io_prometheus_client is a generated protocol buffer package.
-
-It is generated from these files:
- metrics.proto
-
-It has these top-level messages:
- LabelPair
- Gauge
- Counter
- Quantile
- Summary
- Untyped
- Histogram
- Bucket
- Metric
- MetricFamily
-*/
-package io_prometheus_client
+
+package io_prometheus_client // import "github.com/prometheus/client_model/go"
import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
+var _ = fmt.Errorf
var _ = math.Inf
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
type MetricType int32
const (
@@ -70,16 +59,41 @@ func (x *MetricType) UnmarshalJSON(data []byte) error {
*x = MetricType(value)
return nil
}
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *LabelPair) Reset() { *m = LabelPair{} }
func (m *LabelPair) String() string { return proto.CompactTextString(m) }
func (*LabelPair) ProtoMessage() {}
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0}
+}
+func (m *LabelPair) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelPair.Unmarshal(m, b)
+}
+func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
+}
+func (dst *LabelPair) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelPair.Merge(dst, src)
+}
+func (m *LabelPair) XXX_Size() int {
+ return xxx_messageInfo_LabelPair.Size(m)
+}
+func (m *LabelPair) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelPair.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelPair proto.InternalMessageInfo
func (m *LabelPair) GetName() string {
if m != nil && m.Name != nil {
@@ -96,13 +110,35 @@ func (m *LabelPair) GetValue() string {
}
type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Gauge) Reset() { *m = Gauge{} }
func (m *Gauge) String() string { return proto.CompactTextString(m) }
func (*Gauge) ProtoMessage() {}
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1}
+}
+func (m *Gauge) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Gauge.Unmarshal(m, b)
+}
+func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
+}
+func (dst *Gauge) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Gauge.Merge(dst, src)
+}
+func (m *Gauge) XXX_Size() int {
+ return xxx_messageInfo_Gauge.Size(m)
+}
+func (m *Gauge) XXX_DiscardUnknown() {
+ xxx_messageInfo_Gauge.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Gauge proto.InternalMessageInfo
func (m *Gauge) GetValue() float64 {
if m != nil && m.Value != nil {
@@ -112,13 +148,35 @@ func (m *Gauge) GetValue() float64 {
}
type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Counter) Reset() { *m = Counter{} }
func (m *Counter) String() string { return proto.CompactTextString(m) }
func (*Counter) ProtoMessage() {}
+func (*Counter) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2}
+}
+func (m *Counter) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Counter.Unmarshal(m, b)
+}
+func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
+}
+func (dst *Counter) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Counter.Merge(dst, src)
+}
+func (m *Counter) XXX_Size() int {
+ return xxx_messageInfo_Counter.Size(m)
+}
+func (m *Counter) XXX_DiscardUnknown() {
+ xxx_messageInfo_Counter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Counter proto.InternalMessageInfo
func (m *Counter) GetValue() float64 {
if m != nil && m.Value != nil {
@@ -128,14 +186,36 @@ func (m *Counter) GetValue() float64 {
}
type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Quantile) Reset() { *m = Quantile{} }
func (m *Quantile) String() string { return proto.CompactTextString(m) }
func (*Quantile) ProtoMessage() {}
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3}
+}
+func (m *Quantile) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Quantile.Unmarshal(m, b)
+}
+func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
+}
+func (dst *Quantile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Quantile.Merge(dst, src)
+}
+func (m *Quantile) XXX_Size() int {
+ return xxx_messageInfo_Quantile.Size(m)
+}
+func (m *Quantile) XXX_DiscardUnknown() {
+ xxx_messageInfo_Quantile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Quantile proto.InternalMessageInfo
func (m *Quantile) GetQuantile() float64 {
if m != nil && m.Quantile != nil {
@@ -152,15 +232,37 @@ func (m *Quantile) GetValue() float64 {
}
type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
+func (*Summary) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4}
+}
+func (m *Summary) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Summary.Unmarshal(m, b)
+}
+func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
+}
+func (dst *Summary) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Summary.Merge(dst, src)
+}
+func (m *Summary) XXX_Size() int {
+ return xxx_messageInfo_Summary.Size(m)
+}
+func (m *Summary) XXX_DiscardUnknown() {
+ xxx_messageInfo_Summary.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Summary proto.InternalMessageInfo
func (m *Summary) GetSampleCount() uint64 {
if m != nil && m.SampleCount != nil {
@@ -184,13 +286,35 @@ func (m *Summary) GetQuantile() []*Quantile {
}
type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Untyped) Reset() { *m = Untyped{} }
func (m *Untyped) String() string { return proto.CompactTextString(m) }
func (*Untyped) ProtoMessage() {}
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5}
+}
+func (m *Untyped) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Untyped.Unmarshal(m, b)
+}
+func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
+}
+func (dst *Untyped) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Untyped.Merge(dst, src)
+}
+func (m *Untyped) XXX_Size() int {
+ return xxx_messageInfo_Untyped.Size(m)
+}
+func (m *Untyped) XXX_DiscardUnknown() {
+ xxx_messageInfo_Untyped.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Untyped proto.InternalMessageInfo
func (m *Untyped) GetValue() float64 {
if m != nil && m.Value != nil {
@@ -200,15 +324,37 @@ func (m *Untyped) GetValue() float64 {
}
type Histogram struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"`
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Histogram) Reset() { *m = Histogram{} }
func (m *Histogram) String() string { return proto.CompactTextString(m) }
func (*Histogram) ProtoMessage() {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6}
+}
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Histogram.Unmarshal(m, b)
+}
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+}
+func (dst *Histogram) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Histogram.Merge(dst, src)
+}
+func (m *Histogram) XXX_Size() int {
+ return xxx_messageInfo_Histogram.Size(m)
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+ xxx_messageInfo_Histogram.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
func (m *Histogram) GetSampleCount() uint64 {
if m != nil && m.SampleCount != nil {
@@ -232,14 +378,36 @@ func (m *Histogram) GetBucket() []*Bucket {
}
type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Bucket) Reset() { *m = Bucket{} }
func (m *Bucket) String() string { return proto.CompactTextString(m) }
func (*Bucket) ProtoMessage() {}
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7}
+}
+func (m *Bucket) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Bucket.Unmarshal(m, b)
+}
+func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+}
+func (dst *Bucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Bucket.Merge(dst, src)
+}
+func (m *Bucket) XXX_Size() int {
+ return xxx_messageInfo_Bucket.Size(m)
+}
+func (m *Bucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Bucket proto.InternalMessageInfo
func (m *Bucket) GetCumulativeCount() uint64 {
if m != nil && m.CumulativeCount != nil {
@@ -256,19 +424,41 @@ func (m *Bucket) GetUpperBound() float64 {
}
type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (m *Metric) String() string { return proto.CompactTextString(m) }
func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8}
+}
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (dst *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(dst, src)
+}
+func (m *Metric) XXX_Size() int {
+ return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
func (m *Metric) GetLabel() []*LabelPair {
if m != nil {
@@ -320,16 +510,38 @@ func (m *Metric) GetTimestampMs() int64 {
}
type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
}
func (m *MetricFamily) Reset() { *m = MetricFamily{} }
func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
func (*MetricFamily) ProtoMessage() {}
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9}
+}
+func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
+}
+func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
+}
+func (dst *MetricFamily) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricFamily.Merge(dst, src)
+}
+func (m *MetricFamily) XXX_Size() int {
+ return xxx_messageInfo_MetricFamily.Size(m)
+}
+func (m *MetricFamily) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
func (m *MetricFamily) GetName() string {
if m != nil && m.Name != nil {
@@ -360,5 +572,58 @@ func (m *MetricFamily) GetMetric() []*Metric {
}
func init() {
+ proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
+ proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
+ proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
+ proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
+ proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
+ proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
+ proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
+ proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
+ proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
+ proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
}
+
+func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) }
+
+var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{
+ // 591 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e,
+ 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89,
+ 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81,
+ 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47,
+ 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77,
+ 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e,
+ 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64,
+ 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58,
+ 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c,
+ 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2,
+ 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4,
+ 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12,
+ 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c,
+ 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee,
+ 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f,
+ 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54,
+ 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea,
+ 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63,
+ 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45,
+ 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d,
+ 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5,
+ 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d,
+ 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d,
+ 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7,
+ 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8,
+ 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2,
+ 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58,
+ 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11,
+ 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff,
+ 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02,
+ 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd,
+ 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25,
+ 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9,
+ 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27,
+ 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9,
+ 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48,
+ 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 487fdc6c..c092723e 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -31,6 +31,7 @@ type Decoder interface {
Decode(*dto.MetricFamily) error
}
+// DecodeOptions contains options used by the Decoder and in sample extraction.
type DecodeOptions struct {
// Timestamp is added to each value from the stream that has no explicit timestamp set.
Timestamp model.Time
@@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
return nil
}
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
type SampleDecoder struct {
Dec Decoder
Opts *DecodeOptions
@@ -149,37 +152,51 @@ type SampleDecoder struct {
f dto.MetricFamily
}
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
func (sd *SampleDecoder) Decode(s *model.Vector) error {
- if err := sd.Dec.Decode(&sd.f); err != nil {
+ err := sd.Dec.Decode(&sd.f)
+ if err != nil {
return err
}
- *s = extractSamples(&sd.f, sd.Opts)
- return nil
+ *s, err = extractSamples(&sd.f, sd.Opts)
+ return err
}
-// Extract samples builds a slice of samples from the provided metric families.
-func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
- var all model.Vector
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurrs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occurred.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+ var (
+ all model.Vector
+ lastErr error
+ )
for _, f := range fams {
- all = append(all, extractSamples(f, o)...)
+ some, err := extractSamples(f, o)
+ if err != nil {
+ lastErr = err
+ continue
+ }
+ all = append(all, some...)
}
- return all
+ return all, lastErr
}
-func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
switch f.GetType() {
case dto.MetricType_COUNTER:
- return extractCounter(o, f)
+ return extractCounter(o, f), nil
case dto.MetricType_GAUGE:
- return extractGauge(o, f)
+ return extractGauge(o, f), nil
case dto.MetricType_SUMMARY:
- return extractSummary(o, f)
+ return extractSummary(o, f), nil
case dto.MetricType_UNTYPED:
- return extractUntyped(o, f)
+ return extractUntyped(o, f), nil
case dto.MetricType_HISTOGRAM:
- return extractHistogram(o, f)
+ return extractHistogram(o, f), nil
}
- panic("expfmt.extractSamples: unknown metric family type")
+ return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
}
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 366fbde9..c71bcb98 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -11,27 +11,25 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// A package for reading and writing Prometheus metrics.
+// Package expfmt contains tools for reading and writing Prometheus metrics.
package expfmt
+// Format specifies the HTTP content type of the different wire protocols.
type Format string
+// Constants to assemble the Content-Type values for the different wire protocols.
const (
- TextVersion = "0.0.4"
-
+ TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
// The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>`
- FmtText Format = `text/plain; version=` + TextVersion
+ FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
-
- // fmtJSON2 is hidden as it is deprecated.
- fmtJSON2 Format = `application/json; version=0.0.2`
)
const (
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 1e060879..f11321cd 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -25,9 +25,12 @@ import (
// MetricFamilyToText converts a MetricFamily proto message into text format and
// writes the resulting lines to 'out'. It returns the number of bytes written
-// and any error encountered. This function does not perform checks on the
-// content of the metric and label names, i.e. invalid metric or label names
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output.
+//
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
var written int
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index bd170b16..ec3d86ba 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -47,7 +47,7 @@ func (e ParseError) Error() string {
}
// TextParser is used to parse the simple and flat text-based exchange format. Its
-// nil value is ready to use.
+// zero value is ready to use.
type TextParser struct {
metricFamiliesByName map[string]*dto.MetricFamily
buf *bufio.Reader // Where the parsed input is read through.
@@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn {
if p.readTokenAsLabelValue(); p.err != nil {
return nil
}
+ if !model.LabelValue(p.currentToken.String()).IsValid() {
+ p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+ return nil
+ }
p.currentLabelPair.Value = proto.String(p.currentToken.String())
// Special treatment of summaries:
// - Quantile labels are special, will result in dto.Quantile later.
@@ -355,7 +359,7 @@ func (p *TextParser) startLabelValue() stateFn {
}
return p.readingValue
default:
- p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value))
+ p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
return nil
}
}
@@ -552,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() {
// byte considered is the byte already read (now in p.currentByte). The first
// newline byte encountered is still copied into p.currentByte, but not into
// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
-// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All
-// other escape sequences are invalid and cause an error.
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
p.currentToken.Reset()
escaped := false
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index 3b72e7ff..41051a01 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -80,14 +80,18 @@ const (
QuantileLabel = "quantile"
)
-// LabelNameRE is a regular expression matching valid label names.
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// A LabelName is a key for a LabelSet or Metric. It has a value associated
// therewith.
type LabelName string
-// IsValid is true iff the label name matches the pattern of LabelNameRE.
+// IsValid is true iff the label name matches the pattern of LabelNameRE. This
+// method, however, does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
@@ -106,7 +110,7 @@ func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err := unmarshal(&s); err != nil {
return err
}
- if !LabelNameRE.MatchString(s) {
+ if !LabelName(s).IsValid() {
return fmt.Errorf("%q is not a valid label name", s)
}
*ln = LabelName(s)
@@ -119,7 +123,7 @@ func (ln *LabelName) UnmarshalJSON(b []byte) error {
if err := json.Unmarshal(b, &s); err != nil {
return err
}
- if !LabelNameRE.MatchString(s) {
+ if !LabelName(s).IsValid() {
return fmt.Errorf("%q is not a valid label name", s)
}
*ln = LabelName(s)
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
index 5f931cdb..6eda08a7 100644
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -160,7 +160,7 @@ func (l *LabelSet) UnmarshalJSON(b []byte) error {
// LabelName as a string and does not call its UnmarshalJSON method.
// Thus, we have to replicate the behavior here.
for ln := range m {
- if !LabelNameRE.MatchString(string(ln)) {
+ if !ln.IsValid() {
return fmt.Errorf("%q is not a valid label name", ln)
}
}
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index a5da59a5..f7250909 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -21,8 +21,11 @@ import (
)
var (
- separator = []byte{0}
- MetricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`)
+ separator = []byte{0}
+ // MetricNameRE is a regular expression matching valid metric
+ // names. Note that the IsValidMetricName function performs the same
+ // check but faster than a match with this regular expression.
+ MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
)
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
@@ -41,7 +44,7 @@ func (m Metric) Before(o Metric) bool {
// Clone returns a copy of the Metric.
func (m Metric) Clone() Metric {
- clone := Metric{}
+ clone := make(Metric, len(m))
for k, v := range m {
clone[k] = v
}
@@ -85,6 +88,8 @@ func (m Metric) FastFingerprint() Fingerprint {
}
// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
func IsValidMetricName(n LabelValue) bool {
if len(n) == 0 {
return false
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index 7538e299..bb99889d 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -59,8 +59,8 @@ func (m *Matcher) Validate() error {
return nil
}
-// Silence defines the representation of a silence definiton
-// in the Prometheus eco-system.
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
type Silence struct {
ID uint64 `json:"id,omitempty"`
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 548968ae..74ed5a9f 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -163,9 +163,21 @@ func (t *Time) UnmarshalJSON(b []byte) error {
// This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration
+// Set implements pflag/flag.Value
+func (d *Duration) Set(s string) error {
+ var err error
+ *d, err = ParseDuration(s)
+ return err
+}
+
+// Type implements pflag.Value
+func (d *Duration) Type() string {
+ return "duration"
+}
+
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
-// StringToDuration parses a string into a time.Duration, assuming that a year
+// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
matches := durationRE.FindStringSubmatch(durationStr)
@@ -202,6 +214,9 @@ func (d Duration) String() string {
ms = int64(time.Duration(d) / time.Millisecond)
unit = "ms"
)
+ if ms == 0 {
+ return "0s"
+ }
factors := map[string]int64{
"y": 1000 * 60 * 60 * 24 * 365,
"w": 1000 * 60 * 60 * 24 * 7,
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index dbf5d10e..c9d8fb1a 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -22,6 +22,22 @@ import (
"strings"
)
+var (
+ // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+ // non-existing sample pair. It is a SamplePair with timestamp Earliest and
+ // value 0.0. Note that the natural zero value of SamplePair has a timestamp
+ // of 0, which is possible to appear in a real SamplePair and thus not
+ // suitable to signal a non-existing SamplePair.
+ ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+ // ZeroSample is the pseudo zero-value of Sample used to signal a
+ // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+ // and metric nil. Note that the natural zero value of Sample has a timestamp
+ // of 0, which is possible to appear in a real Sample and thus not suitable
+ // to signal a non-existing Sample.
+ ZeroSample = Sample{Timestamp: Earliest}
+)
+
// A SampleValue is a representation of a value for a given sample at a given
// time.
type SampleValue float64
@@ -84,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error {
}
// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
func (s *SamplePair) Equal(o *SamplePair) bool {
return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
}
@@ -101,7 +117,7 @@ type Sample struct {
}
// Equal compares first the metrics, then the timestamp, then the value. The
-// sematics of value equality is defined by SampleValue.Equal.
+// semantics of value equality is defined by SampleValue.Equal.
func (s *Sample) Equal(o *Sample) bool {
if s == o {
return true
@@ -113,11 +129,8 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
- if s.Value.Equal(o.Value) {
- return false
- }
- return true
+ return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
diff --git a/vendor/github.com/prometheus/procfs/AUTHORS.md b/vendor/github.com/prometheus/procfs/AUTHORS.md
deleted file mode 100644
index 0c802dd8..00000000
--- a/vendor/github.com/prometheus/procfs/AUTHORS.md
+++ /dev/null
@@ -1,20 +0,0 @@
-The Prometheus project was started by Matt T. Proud (emeritus) and
-Julius Volz in 2012.
-
-Maintainers of this repository:
-
-* Tobias Schmidt <ts@soundcloud.com>
-
-The following individuals have contributed code to this repository
-(listed in alphabetical order):
-
-* Armen Baghumian <abaghumian@noggin.com.au>
-* Bjoern Rabenstein <beorn@soundcloud.com>
-* David Cournapeau <cournape@gmail.com>
-* Ji-Hoon, Seol <jihoon.seol@gmail.com>
-* Jonas Große Sundrup <cherti@letopolis.de>
-* Julius Volz <julius.volz@gmail.com>
-* Matthias Rampke <mr@soundcloud.com>
-* Nicky Gerritsen <nicky@streamone.nl>
-* Rémi Audebert <contact@halfr.net>
-* Tobias Schmidt <tobidt@gmail.com>
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
index 5705f0fb..40503edb 100644
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -2,9 +2,9 @@
Prometheus uses GitHub to manage reviews of pull requests.
-* If you have a trivial fix or improvement, go ahead and create a pull
- request, addressing (with `@...`) one or more of the maintainers
- (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+ addressing (with `@...`) the maintainer of this repository (see
+ [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
* If you plan to do something more involved, first discuss your ideas
on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
new file mode 100644
index 00000000..35993c41
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -0,0 +1 @@
+* Tobias Schmidt <tobidt@gmail.com>
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
index c264a49d..4d109839 100644
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -1,6 +1,77 @@
-ci:
- ! gofmt -l *.go | read nothing
- go vet
- go test -v ./...
- go get github.com/golang/lint/golint
- golint *.go
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Ensure GOBIN is not set during build so that promu is installed to the correct path
+unexport GOBIN
+
+GO ?= go
+GOFMT ?= $(GO)fmt
+FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
+STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
+pkgs = $(shell $(GO) list ./... | grep -v /vendor/)
+
+PREFIX ?= $(shell pwd)
+BIN_DIR ?= $(shell pwd)
+
+ifdef DEBUG
+ bindata_flags = -debug
+endif
+
+STATICCHECK_IGNORE =
+
+all: format staticcheck build test
+
+style:
+ @echo ">> checking code style"
+ @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^'
+
+check_license:
+ @echo ">> checking license header"
+ @./scripts/check_license.sh
+
+test: fixtures/.unpacked sysfs/fixtures/.unpacked
+ @echo ">> running all tests"
+ @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples)
+
+format:
+ @echo ">> formatting code"
+ @$(GO) fmt $(pkgs)
+
+vet:
+ @echo ">> vetting code"
+ @$(GO) vet $(pkgs)
+
+staticcheck: $(STATICCHECK)
+ @echo ">> running staticcheck"
+ @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
+
+%/.unpacked: %.ttar
+ ./ttar -C $(dir $*) -x -f $*.ttar
+ touch $@
+
+update_fixtures: fixtures.ttar sysfs/fixtures.ttar
+
+%fixtures.ttar: %/fixtures
+ rm -v $(dir $*)fixtures/.unpacked
+ ./ttar -C $(dir $*) -c -f $*fixtures.ttar fixtures/
+
+$(FIRST_GOPATH)/bin/staticcheck:
+ @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
+
+.PHONY: all style check_license format test vet staticcheck
+
+# Declaring the binaries at their default locations as PHONY targets is a hack
+# to ensure the latest version is downloaded on every make execution.
+# If this is not desired, copy/symlink these binaries to a different path and
+# set the respective environment variables.
+.PHONY: $(GOPATH)/bin/staticcheck
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 6e7ee6b8..20954947 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -8,3 +8,4 @@ backwards-incompatible ways without warnings. Use it at your own risk.
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 00000000..d3a82680
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,95 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+ Node string
+ Zone string
+ Sizes []float64
+}
+
+// NewBuddyInfo reads the buddyinfo statistics.
+func NewBuddyInfo() ([]BuddyInfo, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewBuddyInfo()
+}
+
+// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {
+ file, err := os.Open(fs.Path("buddyinfo"))
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+
+ return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+ var (
+ buddyInfo = []BuddyInfo{}
+ scanner = bufio.NewScanner(r)
+ bucketCount = -1
+ )
+
+ for scanner.Scan() {
+ var err error
+ line := scanner.Text()
+ parts := strings.Fields(line)
+
+ if len(parts) < 4 {
+ return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ }
+
+ node := strings.TrimRight(parts[1], ",")
+ zone := strings.TrimRight(parts[3], ",")
+ arraySize := len(parts[4:])
+
+ if bucketCount == -1 {
+ bucketCount = arraySize
+ } else {
+ if bucketCount != arraySize {
+ return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ }
+ }
+
+ sizes := make([]float64, arraySize)
+ for i := 0; i < arraySize; i++ {
+ sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+ if err != nil {
+ return nil, fmt.Errorf("invalid value in buddyinfo: %s", err)
+ }
+ }
+
+ buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+ }
+
+ return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
new file mode 100644
index 00000000..3ee8291e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fixtures.ttar
@@ -0,0 +1,446 @@
+# Archive created by ttar -c -f fixtures.ttar fixtures/
+Directory: fixtures
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/cmdline
+Lines: 1
+vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/comm
+Lines: 1
+vim
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/exe
+SymlinkTo: /usr/bin/vim
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/fd
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/0
+SymlinkTo: ../../symlinktargets/abc
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/1
+SymlinkTo: ../../symlinktargets/def
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/10
+SymlinkTo: ../../symlinktargets/xyz
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/2
+SymlinkTo: ../../symlinktargets/ghi
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/fd/3
+SymlinkTo: ../../symlinktargets/uvw
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/io
+Lines: 7
+rchar: 750339
+wchar: 818609
+syscr: 7405
+syscw: 5245
+read_bytes: 1024
+write_bytes: 2048
+cancelled_write_bytes: -1024
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/limits
+Lines: 17
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 62898 62898 processes
+Max open files 2048 4096 files
+Max locked memory 65536 65536 bytes
+Max address space 8589934592 unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 62898 62898 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/mountstats
+Lines: 19
+device rootfs mounted on / with fstype rootfs
+device sysfs mounted on /sys with fstype sysfs
+device proc mounted on /proc with fstype proc
+device /dev/sda1 mounted on / with fstype ext4
+device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
+ opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none
+ age: 13968
+ caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
+ nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
+ sec: flavor=1,pseudoflavor=1
+ events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
+ bytes: 1207640230 0 0 0 1210214218 0 295483 0
+ RPC iostats version: 1.0 p/v: 100003/4 (nfs)
+ xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
+ per-op statistics
+ NULL: 0 0 0 0 0 0 0 0
+ READ: 1298 1298 0 207680 1210292152 6 79386 79407
+ WRITE: 0 0 0 0 0 0 0 0
+
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/net
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/net/dev
+Lines: 4
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+ lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26231/ns
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/ns/mnt
+SymlinkTo: mnt:[4026531840]
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/ns/net
+SymlinkTo: net:[4026531993]
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26231/stat
+Lines: 1
+26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26232
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/cmdline
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/comm
+Lines: 1
+ata_sff
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26232/fd
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/0
+SymlinkTo: ../../symlinktargets/abc
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/1
+SymlinkTo: ../../symlinktargets/def
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/2
+SymlinkTo: ../../symlinktargets/ghi
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/3
+SymlinkTo: ../../symlinktargets/uvw
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/fd/4
+SymlinkTo: ../../symlinktargets/xyz
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/limits
+Lines: 17
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 29436 29436 processes
+Max open files 1024 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 29436 29436 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
+Max realtime priority 0 0
+Max realtime timeout unlimited unlimited us
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26232/stat
+Lines: 1
+33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/26233
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/26233/cmdline
+Lines: 1
+com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/584
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/584/stat
+Lines: 2
+1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
+#!/bin/cat /proc/self/stat
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/short
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/short/buddyinfo
+Lines: 3
+Node 0, zone
+Node 0, zone
+Node 0, zone
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/sizemismatch
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/sizemismatch/buddyinfo
+Lines: 3
+Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
+Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0
+Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/buddyinfo/valid
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/buddyinfo/valid/buddyinfo
+Lines: 3
+Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
+Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
+Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/fs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/fs/xfs
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/fs/xfs/stat
+Lines: 23
+extent_alloc 92447 97589 92448 93751
+abt 0 0 0 0
+blk_map 1767055 188820 184891 92447 92448 2140766 0
+bmbt 0 0 0 0
+dir 185039 92447 92444 136422
+trans 706 944304 0
+ig 185045 58807 0 126238 0 33637 22
+log 2883 113448 9 17360 739
+push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
+xstrat 92447 0
+rw 107739 94045
+attr 4 0 0 0
+icluster 8677 7849 135802
+vnodes 92601 0 0 0 92444 92444 92444 0
+buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
+abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
+abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
+bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
+fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+qm 0 0 0 0 0 0 0 0
+xpc 399724544 92823103 86219234
+debug 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/mdstat
+Lines: 26
+Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
+md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9]
+ 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
+
+md127 : active raid1 sdi2[0] sdj2[1]
+ 312319552 blocks [2/2] [UU]
+
+md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1]
+ 248896 blocks [2/2] [UU]
+
+md4 : inactive raid1 sda3[0] sdb3[1]
+ 4883648 blocks [2/2] [UU]
+
+md6 : active raid1 sdb2[2] sda2[0]
+ 195310144 blocks [2/1] [U_]
+ [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md8 : active raid1 sdb1[1] sda1[0]
+ 195310144 blocks [2/2] [UU]
+ [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
+
+md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1]
+ 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
+ bitmap: 0/30 pages [0KB], 65536KB chunk
+
+unused devices: <none>
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/net
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/dev
+Lines: 6
+Inter-| Receive | Transmit
+ face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
+vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
+ lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
+docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
+ eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/ip_vs
+Lines: 21
+IP Virtual Server version 1.2.1 (size=4096)
+Prot LocalAddress:Port Scheduler Flags
+ -> RemoteAddress:Port Forward Weight ActiveConn InActConn
+TCP C0A80016:0CEA wlc
+ -> C0A85216:0CEA Tunnel 100 248 2
+ -> C0A85318:0CEA Tunnel 100 248 2
+ -> C0A85315:0CEA Tunnel 100 248 1
+TCP C0A80039:0CEA wlc
+ -> C0A85416:0CEA Tunnel 0 0 0
+ -> C0A85215:0CEA Tunnel 100 1499 0
+ -> C0A83215:0CEA Tunnel 100 1498 0
+TCP C0A80037:0CEA wlc
+ -> C0A8321A:0CEA Tunnel 0 0 0
+ -> C0A83120:0CEA Tunnel 100 0 0
+TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
+ -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
+ -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
+ -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
+FWM 10001000 wlc
+ -> C0A8321A:0CEA Route 0 0 1
+ -> C0A83215:0CEA Route 0 0 2
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/ip_vs_stats
+Lines: 6
+ Total Incoming Outgoing Incoming Outgoing
+ Conns Packets Packets Bytes Bytes
+ 16AA370 E33656E5 0 51D8C8883AB3 0
+
+ Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
+ 4 1FB3C 0 1282A8F 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/net/rpc
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/rpc/nfs
+Lines: 5
+net 18628 0 18628 6
+rpc 4329785 0 4338291
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
+proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/rpc/nfsd
+Lines: 11
+rc 0 6 18622
+fh 0 0 0 0 0
+io 157286400 0
+th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
+ra 32 0 0 0 0 0 0 0 0 0 0 0
+net 18628 0 18628 6
+rpc 18628 0 0 0 0
+proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
+proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
+proc4 2 2 10853
+proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/net/xfrm_stat
+Lines: 28
+XfrmInError 1
+XfrmInBufferError 2
+XfrmInHdrError 4
+XfrmInNoStates 3
+XfrmInStateProtoError 40
+XfrmInStateModeError 100
+XfrmInStateSeqError 6000
+XfrmInStateExpired 4
+XfrmInStateMismatch 23451
+XfrmInStateInvalid 55555
+XfrmInTmplMismatch 51
+XfrmInNoPols 65432
+XfrmInPolBlock 100
+XfrmInPolError 10000
+XfrmOutError 1000000
+XfrmOutBundleGenError 43321
+XfrmOutBundleCheckError 555
+XfrmOutNoStates 869
+XfrmOutStateProtoError 4542
+XfrmOutStateModeError 4
+XfrmOutStateSeqError 543
+XfrmOutStateExpired 565
+XfrmOutPolBlock 43456
+XfrmOutPolDead 7656
+XfrmOutPolError 1454
+XfrmFwdHdrError 6654
+XfrmOutStateInvalid 28765
+XfrmAcquireError 24532
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/self
+SymlinkTo: 26231
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/stat
+Lines: 16
+cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
+cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
+cpu1 47869 23 16474 1110787 591 0 46 0 0 0
+cpu2 46504 36 15916 1112321 441 0 326 0 0 0
+cpu3 47054 102 15683 1113230 533 0 60 0 0 0
+cpu4 28413 25 10776 1140321 217 0 8 0 0 0
+cpu5 29271 101 11586 1136270 672 0 30 0 0 0
+cpu6 29152 36 10276 1139721 319 0 29 0 0 0
+cpu7 29098 268 10164 1139282 555 0 31 0 0 0
+intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 38014093
+btime 1418183276
+processes 26442
+procs_running 2
+procs_blocked 1
+softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Directory: fixtures/symlinktargets
+Mode: 755
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/README
+Lines: 2
+This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
+They are otherwise ignored by the tests
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/abc
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/def
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/ghi
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/uvw
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+Path: fixtures/symlinktargets/xyz
+Lines: 0
+Mode: 644
+# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 49aaab05..b6c6b2ce 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -1,9 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
"fmt"
"os"
"path"
+
+ "github.com/prometheus/procfs/nfs"
+ "github.com/prometheus/procfs/xfs"
)
// FS represents the pseudo-filesystem proc, which provides an interface to
@@ -31,3 +47,36 @@ func NewFS(mountPoint string) (FS, error) {
func (fs FS) Path(p ...string) string {
return path.Join(append([]string{string(fs)}, p...)...)
}
+
+// XFSStats retrieves XFS filesystem runtime statistics.
+func (fs FS) XFSStats() (*xfs.Stats, error) {
+ f, err := os.Open(fs.Path("fs/xfs/stat"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return xfs.ParseStats(f)
+}
+
+// NFSClientRPCStats retrieves NFS client RPC statistics.
+func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) {
+ f, err := os.Open(fs.Path("net/rpc/nfs"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return nfs.ParseClientRPCStats(f)
+}
+
+// NFSdServerRPCStats retrieves NFS daemon RPC statistics.
+func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) {
+ f, err := os.Open(fs.Path("net/rpc/nfsd"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return nfs.ParseServerRPCStats(f)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
new file mode 100644
index 00000000..1ad21c91
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import "strconv"
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+ us := make([]uint32, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, uint32(u))
+ }
+
+ return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+ us := make([]uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, u)
+ }
+
+ return us, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index e7012f73..e36d4a3b 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -31,14 +44,16 @@ type IPVSStats struct {
type IPVSBackendStatus struct {
// The local (virtual) IP address.
LocalAddress net.IP
- // The local (virtual) port.
- LocalPort uint16
- // The transport protocol (TCP, UDP).
- Proto string
// The remote (real) IP address.
RemoteAddress net.IP
+ // The local (virtual) port.
+ LocalPort uint16
// The remote (real) port.
RemotePort uint16
+ // The local firewall mark
+ LocalMark string
+ // The transport protocol (TCP, UDP).
+ Proto string
// The current number of active connections for this virtual/real address pair.
ActiveConn uint64
// The current number of inactive connections for this virtual/real address pair.
@@ -142,13 +157,14 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status []IPVSBackendStatus
scanner = bufio.NewScanner(file)
proto string
+ localMark string
localAddress net.IP
localPort uint16
err error
)
for scanner.Scan() {
- fields := strings.Fields(string(scanner.Text()))
+ fields := strings.Fields(scanner.Text())
if len(fields) == 0 {
continue
}
@@ -160,10 +176,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
continue
}
proto = fields[0]
+ localMark = ""
localAddress, localPort, err = parseIPPort(fields[1])
if err != nil {
return nil, err
}
+ case fields[0] == "FWM":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = fields[1]
+ localAddress = nil
+ localPort = 0
case fields[0] == "->":
if len(fields) < 6 {
continue
@@ -187,6 +212,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status = append(status, IPVSBackendStatus{
LocalAddress: localAddress,
LocalPort: localPort,
+ LocalMark: localMark,
RemoteAddress: remoteAddress,
RemotePort: remotePort,
Proto: proto,
@@ -200,22 +226,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
}
func parseIPPort(s string) (net.IP, uint16, error) {
- tmp := strings.SplitN(s, ":", 2)
-
- if len(tmp) != 2 {
- return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
- }
+ var (
+ ip net.IP
+ err error
+ )
- if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
- return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ switch len(s) {
+ case 13:
+ ip, err = hex.DecodeString(s[0:8])
+ if err != nil {
+ return nil, 0, err
+ }
+ case 46:
+ ip = net.ParseIP(s[1:40])
+ if ip == nil {
+ return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ }
+ default:
+ return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
}
- ip, err := hex.DecodeString(tmp[0])
- if err != nil {
- return nil, 0, err
+ portString := s[len(s)-4:]
+ if len(portString) != 4 {
+ return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
}
-
- port, err := strconv.ParseUint(tmp[1], 16, 16)
+ port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {
return nil, 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index d7a248c0..9dc19583 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 00000000..7a8a1e09
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,606 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Constants shared between multiple functions.
+const (
+ deviceEntryLen = 8
+
+ fieldBytesLen = 8
+ fieldEventsLen = 27
+
+ statVersion10 = "1.0"
+ statVersion11 = "1.1"
+
+ fieldTransport10TCPLen = 10
+ fieldTransport10UDPLen = 7
+
+ fieldTransport11TCPLen = 13
+ fieldTransport11UDPLen = 10
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+ // Name of the device.
+ Device string
+ // The mount point of the device.
+ Mount string
+ // The filesystem type used by the device.
+ Type string
+ // If available additional statistics related to this Mount.
+ // Use a type assertion to determine if additional statistics are available.
+ Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+ mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+ // The version of statistics provided.
+ StatVersion string
+ // The age of the NFS mount.
+ Age time.Duration
+ // Statistics related to byte counters for various operations.
+ Bytes NFSBytesStats
+ // Statistics related to various NFS event occurrences.
+ Events NFSEventsStats
+ // Statistics broken down by filesystem operation.
+ Operations []NFSOperationStats
+ // Statistics about the NFS RPC transport.
+ Transport NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+ // Number of bytes read using the read() syscall.
+ Read uint64
+ // Number of bytes written using the write() syscall.
+ Write uint64
+ // Number of bytes read using the read() syscall in O_DIRECT mode.
+ DirectRead uint64
+ // Number of bytes written using the write() syscall in O_DIRECT mode.
+ DirectWrite uint64
+ // Number of bytes read from the NFS server, in total.
+ ReadTotal uint64
+ // Number of bytes written to the NFS server, in total.
+ WriteTotal uint64
+ // Number of pages read directly via mmap()'d files.
+ ReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+ // Number of times cached inode attributes are re-validated from the server.
+ InodeRevalidate uint64
+ // Number of times cached dentry nodes are re-validated from the server.
+ DnodeRevalidate uint64
+ // Number of times an inode cache is cleared.
+ DataInvalidate uint64
+ // Number of times cached inode attributes are invalidated.
+ AttributeInvalidate uint64
+ // Number of times files or directories have been open()'d.
+ VFSOpen uint64
+ // Number of times a directory lookup has occurred.
+ VFSLookup uint64
+ // Number of times permissions have been checked.
+ VFSAccess uint64
+ // Number of updates (and potential writes) to pages.
+ VFSUpdatePage uint64
+ // Number of pages read directly via mmap()'d files.
+ VFSReadPage uint64
+ // Number of times a group of pages have been read.
+ VFSReadPages uint64
+ // Number of pages written directly via mmap()'d files.
+ VFSWritePage uint64
+ // Number of times a group of pages have been written.
+ VFSWritePages uint64
+ // Number of times directory entries have been read with getdents().
+ VFSGetdents uint64
+ // Number of times attributes have been set on inodes.
+ VFSSetattr uint64
+ // Number of pending writes that have been forcefully flushed to the server.
+ VFSFlush uint64
+ // Number of times fsync() has been called on directories and files.
+ VFSFsync uint64
+ // Number of times locking has been attempted on a file.
+ VFSLock uint64
+ // Number of times files have been closed and released.
+ VFSFileRelease uint64
+ // Unknown. Possibly unused.
+ CongestionWait uint64
+ // Number of times files have been truncated.
+ Truncation uint64
+ // Number of times a file has been grown due to writes beyond its existing end.
+ WriteExtension uint64
+ // Number of times a file was removed while still open by another process.
+ SillyRename uint64
+ // Number of times the NFS server gave less data than expected while reading.
+ ShortRead uint64
+ // Number of times the NFS server wrote less data than expected while writing.
+ ShortWrite uint64
+ // Number of times the NFS server indicated EJUKEBOX; retrieving data from
+ // offline storage.
+ JukeboxDelay uint64
+ // Number of NFS v4.1+ pNFS reads.
+ PNFSRead uint64
+ // Number of NFS v4.1+ pNFS writes.
+ PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+ // The name of the operation.
+ Operation string
+ // Number of requests performed for this operation.
+ Requests uint64
+ // Number of times an actual RPC request has been transmitted for this operation.
+ Transmissions uint64
+ // Number of times a request has had a major timeout.
+ MajorTimeouts uint64
+ // Number of bytes sent for this operation, including RPC headers and payload.
+ BytesSent uint64
+ // Number of bytes received for this operation, including RPC headers and payload.
+ BytesReceived uint64
+ // Duration all requests spent queued for transmission before they were sent.
+ CumulativeQueueTime time.Duration
+ // Duration it took to get a reply back after the request was transmitted.
+ CumulativeTotalResponseTime time.Duration
+ // Duration from when a request was enqueued to when it was completely handled.
+ CumulativeTotalRequestTime time.Duration
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+ // The transport protocol used for the NFS mount.
+ Protocol string
+ // The local port used for the NFS mount.
+ Port uint64
+ // Number of times the client has had to establish a connection from scratch
+ // to the NFS server.
+ Bind uint64
+ // Number of times the client has made a TCP connection to the NFS server.
+ Connect uint64
+ // Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+ // spent waiting for connections to the server to be established.
+ ConnectIdleTime uint64
+ // Duration since the NFS mount last saw any RPC traffic.
+ IdleTime time.Duration
+ // Number of RPC requests for this mount sent to the NFS server.
+ Sends uint64
+ // Number of RPC responses for this mount received from the NFS server.
+ Receives uint64
+ // Number of times the NFS server sent a response with a transaction ID
+ // unknown to this client.
+ BadTransactionIDs uint64
+ // A running counter, incremented on each request as the current difference
+ // ebetween sends and receives.
+ CumulativeActiveRequests uint64
+ // A running counter, incremented on each request by the current backlog
+ // queue size.
+ CumulativeBacklog uint64
+
+ // Stats below only available with stat version 1.1.
+
+ // Maximum number of simultaneously active RPC requests ever used.
+ MaximumRPCSlotsUsed uint64
+ // A running counter, incremented on each request as the current size of the
+ // sending queue.
+ CumulativeSendingQueue uint64
+ // A running counter, incremented on each request as the current size of the
+ // pending queue.
+ CumulativePendingQueue uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+ const (
+ device = "device"
+ statVersionPrefix = "statvers="
+
+ nfs3Type = "nfs"
+ nfs4Type = "nfs4"
+ )
+
+ var mounts []*Mount
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Only look for device entries in this function
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 || ss[0] != device {
+ continue
+ }
+
+ m, err := parseMount(ss)
+ if err != nil {
+ return nil, err
+ }
+
+ // Does this mount also possess statistics information?
+ if len(ss) > deviceEntryLen {
+ // Only NFSv3 and v4 are supported for parsing statistics
+ if m.Type != nfs3Type && m.Type != nfs4Type {
+ return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ }
+
+ statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+ stats, err := parseMountStatsNFS(s, statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ m.Stats = stats
+ }
+
+ mounts = append(mounts, m)
+ }
+
+ return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+// device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+ if len(ss) < deviceEntryLen {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+
+ // Check for specific words appearing at specific indices to ensure
+ // the format is consistent with what we expect
+ format := []struct {
+ i int
+ s string
+ }{
+ {i: 0, s: "device"},
+ {i: 2, s: "mounted"},
+ {i: 3, s: "on"},
+ {i: 5, s: "with"},
+ {i: 6, s: "fstype"},
+ }
+
+ for _, f := range format {
+ if ss[f.i] != f.s {
+ return nil, fmt.Errorf("invalid device entry: %v", ss)
+ }
+ }
+
+ return &Mount{
+ Device: ss[1],
+ Mount: ss[4],
+ Type: ss[7],
+ }, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+ // Field indicators for parsing specific types of data
+ const (
+ fieldAge = "age:"
+ fieldBytes = "bytes:"
+ fieldEvents = "events:"
+ fieldPerOpStats = "per-op"
+ fieldTransport = "xprt:"
+ )
+
+ stats := &MountStatsNFS{
+ StatVersion: statVersion,
+ }
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ break
+ }
+ if len(ss) < 2 {
+ return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ }
+
+ switch ss[0] {
+ case fieldAge:
+ // Age integer is in seconds
+ d, err := time.ParseDuration(ss[1] + "s")
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Age = d
+ case fieldBytes:
+ bstats, err := parseNFSBytesStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Bytes = *bstats
+ case fieldEvents:
+ estats, err := parseNFSEventsStats(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Events = *estats
+ case fieldTransport:
+ if len(ss) < 3 {
+ return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ }
+
+ tstats, err := parseNFSTransportStats(ss[1:], statVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Transport = *tstats
+ }
+
+ // When encountering "per-operation statistics", we must break this
+ // loop and parse them separately to ensure we can terminate parsing
+ // before reaching another device entry; hence why this 'if' statement
+ // is not just another switch case
+ if ss[0] == fieldPerOpStats {
+ break
+ }
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ // NFS per-operation stats appear last before the next device entry
+ perOpStats, err := parseNFSOperationStats(s)
+ if err != nil {
+ return nil, err
+ }
+
+ stats.Operations = perOpStats
+
+ return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+ if len(ss) != fieldBytesLen {
+ return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldBytesLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSBytesStats{
+ Read: ns[0],
+ Write: ns[1],
+ DirectRead: ns[2],
+ DirectWrite: ns[3],
+ ReadTotal: ns[4],
+ WriteTotal: ns[5],
+ ReadPages: ns[6],
+ WritePages: ns[7],
+ }, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+ if len(ss) != fieldEventsLen {
+ return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ }
+
+ ns := make([]uint64, 0, fieldEventsLen)
+ for _, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ return &NFSEventsStats{
+ InodeRevalidate: ns[0],
+ DnodeRevalidate: ns[1],
+ DataInvalidate: ns[2],
+ AttributeInvalidate: ns[3],
+ VFSOpen: ns[4],
+ VFSLookup: ns[5],
+ VFSAccess: ns[6],
+ VFSUpdatePage: ns[7],
+ VFSReadPage: ns[8],
+ VFSReadPages: ns[9],
+ VFSWritePage: ns[10],
+ VFSWritePages: ns[11],
+ VFSGetdents: ns[12],
+ VFSSetattr: ns[13],
+ VFSFlush: ns[14],
+ VFSFsync: ns[15],
+ VFSLock: ns[16],
+ VFSFileRelease: ns[17],
+ CongestionWait: ns[18],
+ Truncation: ns[19],
+ WriteExtension: ns[20],
+ SillyRename: ns[21],
+ ShortRead: ns[22],
+ ShortWrite: ns[23],
+ JukeboxDelay: ns[24],
+ PNFSRead: ns[25],
+ PNFSWrite: ns[26],
+ }, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+ const (
+ // Number of expected fields in each per-operation statistics set
+ numFields = 9
+ )
+
+ var ops []NFSOperationStats
+
+ for s.Scan() {
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) == 0 {
+ // Must break when reading a blank line after per-operation stats to
+ // enable top-level function to parse the next device entry
+ break
+ }
+
+ if len(ss) != numFields {
+ return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ }
+
+ // Skip string operation name for integers
+ ns := make([]uint64, 0, numFields-1)
+ for _, st := range ss[1:] {
+ n, err := strconv.ParseUint(st, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns = append(ns, n)
+ }
+
+ ops = append(ops, NFSOperationStats{
+ Operation: strings.TrimSuffix(ss[0], ":"),
+ Requests: ns[0],
+ Transmissions: ns[1],
+ MajorTimeouts: ns[2],
+ BytesSent: ns[3],
+ BytesReceived: ns[4],
+ CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond,
+ CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,
+ CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond,
+ })
+ }
+
+ return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+ // Extract the protocol field. It is the only string value in the line
+ protocol := ss[0]
+ ss = ss[1:]
+
+ switch statVersion {
+ case statVersion10:
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport10TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport10UDPLen
+ } else {
+ return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+ }
+ if len(ss) != expectedLength {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ }
+ case statVersion11:
+ var expectedLength int
+ if protocol == "tcp" {
+ expectedLength = fieldTransport11TCPLen
+ } else if protocol == "udp" {
+ expectedLength = fieldTransport11UDPLen
+ } else {
+ return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+ }
+ if len(ss) != expectedLength {
+ return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ }
+ default:
+ return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ }
+
+ // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+ // in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+ // the TCP length here.
+ //
+ // Note: slice length must be set to length of v1.1 stats to avoid a panic when
+ // only v1.0 stats are present.
+ // See: https://github.com/prometheus/node_exporter/issues/571.
+ ns := make([]uint64, fieldTransport11TCPLen)
+ for i, s := range ss {
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ ns[i] = n
+ }
+
+ // The fields differ depending on the transport protocol (TCP or UDP)
+ // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+ //
+ // For the udp RPC transport there is no connection count, connect idle time,
+ // or idle time (fields #3, #4, and #5); all other fields are the same. So
+ // we set them to 0 here.
+ if protocol == "udp" {
+ ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+ }
+
+ return &NFSTransportStats{
+ Protocol: protocol,
+ Port: ns[0],
+ Bind: ns[1],
+ Connect: ns[2],
+ ConnectIdleTime: ns[3],
+ IdleTime: time.Duration(ns[4]) * time.Second,
+ Sends: ns[5],
+ Receives: ns[6],
+ BadTransactionIDs: ns[7],
+ CumulativeActiveRequests: ns[8],
+ CumulativeBacklog: ns[9],
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
new file mode 100644
index 00000000..3f252337
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -0,0 +1,216 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
+type NetDevLine struct {
+ Name string `json:"name"` // The name of the interface.
+ RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received.
+ RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received.
+ RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered.
+ RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving.
+ RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors.
+ RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors.
+ RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
+ RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver.
+ TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted.
+ TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted.
+ TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered.
+ TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting.
+ TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors.
+ TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
+ TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver.
+ TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
+}
+
+// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
+// are interface names.
+type NetDev map[string]NetDevLine
+
+// NewNetDev returns kernel/system statistics read from /proc/net/dev.
+func NewNetDev() (NetDev, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return nil, err
+ }
+
+ return fs.NewNetDev()
+}
+
+// NewNetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NewNetDev() (NetDev, error) {
+ return newNetDev(fs.Path("net/dev"))
+}
+
+// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NewNetDev() (NetDev, error) {
+ return newNetDev(p.path("net/dev"))
+}
+
+// newNetDev creates a new NetDev from the contents of the given file.
+func newNetDev(file string) (NetDev, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return NetDev{}, err
+ }
+ defer f.Close()
+
+ nd := NetDev{}
+ s := bufio.NewScanner(f)
+ for n := 0; s.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line, err := nd.parseLine(s.Text())
+ if err != nil {
+ return nd, err
+ }
+
+ nd[line.Name] = *line
+ }
+
+ return nd, s.Err()
+}
+
+// parseLine parses a single line from the /proc/net/dev file. Header lines
+// must be filtered prior to calling this method.
+func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+ parts := strings.SplitN(rawLine, ":", 2)
+ if len(parts) != 2 {
+ return nil, errors.New("invalid net/dev line, missing colon")
+ }
+ fields := strings.Fields(strings.TrimSpace(parts[1]))
+
+ var err error
+ line := &NetDevLine{}
+
+ // Interface Name
+ line.Name = strings.TrimSpace(parts[0])
+ if line.Name == "" {
+ return nil, errors.New("invalid net/dev line, empty interface name")
+ }
+
+ // RX
+ line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ // TX
+ line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ return line, nil
+}
+
+// Total aggregates the values across interfaces and returns a new NetDevLine.
+// The Name field will be a sorted comma separated list of interface names.
+func (nd NetDev) Total() NetDevLine {
+ total := NetDevLine{}
+
+ names := make([]string, 0, len(nd))
+ for _, ifc := range nd {
+ names = append(names, ifc.Name)
+ total.RxBytes += ifc.RxBytes
+ total.RxPackets += ifc.RxPackets
+ total.RxPackets += ifc.RxPackets
+ total.RxErrors += ifc.RxErrors
+ total.RxDropped += ifc.RxDropped
+ total.RxFIFO += ifc.RxFIFO
+ total.RxFrame += ifc.RxFrame
+ total.RxCompressed += ifc.RxCompressed
+ total.RxMulticast += ifc.RxMulticast
+ total.TxBytes += ifc.TxBytes
+ total.TxPackets += ifc.TxPackets
+ total.TxErrors += ifc.TxErrors
+ total.TxDropped += ifc.TxDropped
+ total.TxFIFO += ifc.TxFIFO
+ total.TxCollisions += ifc.TxCollisions
+ total.TxCarrier += ifc.TxCarrier
+ total.TxCompressed += ifc.TxCompressed
+ }
+ sort.Strings(names)
+ total.Name = strings.Join(names, ", ")
+
+ return total
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go
new file mode 100644
index 00000000..651bf681
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go
@@ -0,0 +1,263 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package nfs implements parsing of /proc/net/rpc/nfsd.
+// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/
+package nfs
+
+// ReplyCache models the "rc" line.
+type ReplyCache struct {
+ Hits uint64
+ Misses uint64
+ NoCache uint64
+}
+
+// FileHandles models the "fh" line.
+type FileHandles struct {
+ Stale uint64
+ TotalLookups uint64
+ AnonLookups uint64
+ DirNoCache uint64
+ NoDirNoCache uint64
+}
+
+// InputOutput models the "io" line.
+type InputOutput struct {
+ Read uint64
+ Write uint64
+}
+
+// Threads models the "th" line.
+type Threads struct {
+ Threads uint64
+ FullCnt uint64
+}
+
+// ReadAheadCache models the "ra" line.
+type ReadAheadCache struct {
+ CacheSize uint64
+ CacheHistogram []uint64
+ NotFound uint64
+}
+
+// Network models the "net" line.
+type Network struct {
+ NetCount uint64
+ UDPCount uint64
+ TCPCount uint64
+ TCPConnect uint64
+}
+
+// ClientRPC models the nfs "rpc" line.
+type ClientRPC struct {
+ RPCCount uint64
+ Retransmissions uint64
+ AuthRefreshes uint64
+}
+
+// ServerRPC models the nfsd "rpc" line.
+type ServerRPC struct {
+ RPCCount uint64
+ BadCnt uint64
+ BadFmt uint64
+ BadAuth uint64
+ BadcInt uint64
+}
+
+// V2Stats models the "proc2" line.
+type V2Stats struct {
+ Null uint64
+ GetAttr uint64
+ SetAttr uint64
+ Root uint64
+ Lookup uint64
+ ReadLink uint64
+ Read uint64
+ WrCache uint64
+ Write uint64
+ Create uint64
+ Remove uint64
+ Rename uint64
+ Link uint64
+ SymLink uint64
+ MkDir uint64
+ RmDir uint64
+ ReadDir uint64
+ FsStat uint64
+}
+
+// V3Stats models the "proc3" line.
+type V3Stats struct {
+ Null uint64
+ GetAttr uint64
+ SetAttr uint64
+ Lookup uint64
+ Access uint64
+ ReadLink uint64
+ Read uint64
+ Write uint64
+ Create uint64
+ MkDir uint64
+ SymLink uint64
+ MkNod uint64
+ Remove uint64
+ RmDir uint64
+ Rename uint64
+ Link uint64
+ ReadDir uint64
+ ReadDirPlus uint64
+ FsStat uint64
+ FsInfo uint64
+ PathConf uint64
+ Commit uint64
+}
+
+// ClientV4Stats models the nfs "proc4" line.
+type ClientV4Stats struct {
+ Null uint64
+ Read uint64
+ Write uint64
+ Commit uint64
+ Open uint64
+ OpenConfirm uint64
+ OpenNoattr uint64
+ OpenDowngrade uint64
+ Close uint64
+ Setattr uint64
+ FsInfo uint64
+ Renew uint64
+ SetClientID uint64
+ SetClientIDConfirm uint64
+ Lock uint64
+ Lockt uint64
+ Locku uint64
+ Access uint64
+ Getattr uint64
+ Lookup uint64
+ LookupRoot uint64
+ Remove uint64
+ Rename uint64
+ Link uint64
+ Symlink uint64
+ Create uint64
+ Pathconf uint64
+ StatFs uint64
+ ReadLink uint64
+ ReadDir uint64
+ ServerCaps uint64
+ DelegReturn uint64
+ GetACL uint64
+ SetACL uint64
+ FsLocations uint64
+ ReleaseLockowner uint64
+ Secinfo uint64
+ FsidPresent uint64
+ ExchangeID uint64
+ CreateSession uint64
+ DestroySession uint64
+ Sequence uint64
+ GetLeaseTime uint64
+ ReclaimComplete uint64
+ LayoutGet uint64
+ GetDeviceInfo uint64
+ LayoutCommit uint64
+ LayoutReturn uint64
+ SecinfoNoName uint64
+ TestStateID uint64
+ FreeStateID uint64
+ GetDeviceList uint64
+ BindConnToSession uint64
+ DestroyClientID uint64
+ Seek uint64
+ Allocate uint64
+ DeAllocate uint64
+ LayoutStats uint64
+ Clone uint64
+}
+
+// ServerV4Stats models the nfsd "proc4" line.
+type ServerV4Stats struct {
+ Null uint64
+ Compound uint64
+}
+
+// V4Ops models the "proc4ops" line: NFSv4 operations
+// Variable list, see:
+// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations)
+// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations)
+// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations)
+type V4Ops struct {
+ //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct?
+ Op0Unused uint64
+ Op1Unused uint64
+ Op2Future uint64
+ Access uint64
+ Close uint64
+ Commit uint64
+ Create uint64
+ DelegPurge uint64
+ DelegReturn uint64
+ GetAttr uint64
+ GetFH uint64
+ Link uint64
+ Lock uint64
+ Lockt uint64
+ Locku uint64
+ Lookup uint64
+ LookupRoot uint64
+ Nverify uint64
+ Open uint64
+ OpenAttr uint64
+ OpenConfirm uint64
+ OpenDgrd uint64
+ PutFH uint64
+ PutPubFH uint64
+ PutRootFH uint64
+ Read uint64
+ ReadDir uint64
+ ReadLink uint64
+ Remove uint64
+ Rename uint64
+ Renew uint64
+ RestoreFH uint64
+ SaveFH uint64
+ SecInfo uint64
+ SetAttr uint64
+ Verify uint64
+ Write uint64
+ RelLockOwner uint64
+}
+
+// ClientRPCStats models all stats from /proc/net/rpc/nfs.
+type ClientRPCStats struct {
+ Network Network
+ ClientRPC ClientRPC
+ V2Stats V2Stats
+ V3Stats V3Stats
+ ClientV4Stats ClientV4Stats
+}
+
+// ServerRPCStats models all stats from /proc/net/rpc/nfsd.
+type ServerRPCStats struct {
+ ReplyCache ReplyCache
+ FileHandles FileHandles
+ InputOutput InputOutput
+ Threads Threads
+ ReadAheadCache ReadAheadCache
+ Network Network
+ ServerRPC ServerRPC
+ V2Stats V2Stats
+ V3Stats V3Stats
+ ServerV4Stats ServerV4Stats
+ V4Ops V4Ops
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go
new file mode 100644
index 00000000..95a83cc5
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse.go
@@ -0,0 +1,317 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "fmt"
+)
+
+func parseReplyCache(v []uint64) (ReplyCache, error) {
+ if len(v) != 3 {
+ return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v)
+ }
+
+ return ReplyCache{
+ Hits: v[0],
+ Misses: v[1],
+ NoCache: v[2],
+ }, nil
+}
+
+func parseFileHandles(v []uint64) (FileHandles, error) {
+ if len(v) != 5 {
+ return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v)
+ }
+
+ return FileHandles{
+ Stale: v[0],
+ TotalLookups: v[1],
+ AnonLookups: v[2],
+ DirNoCache: v[3],
+ NoDirNoCache: v[4],
+ }, nil
+}
+
+func parseInputOutput(v []uint64) (InputOutput, error) {
+ if len(v) != 2 {
+ return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v)
+ }
+
+ return InputOutput{
+ Read: v[0],
+ Write: v[1],
+ }, nil
+}
+
+func parseThreads(v []uint64) (Threads, error) {
+ if len(v) != 2 {
+ return Threads{}, fmt.Errorf("invalid Threads line %q", v)
+ }
+
+ return Threads{
+ Threads: v[0],
+ FullCnt: v[1],
+ }, nil
+}
+
+func parseReadAheadCache(v []uint64) (ReadAheadCache, error) {
+ if len(v) != 12 {
+ return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v)
+ }
+
+ return ReadAheadCache{
+ CacheSize: v[0],
+ CacheHistogram: v[1:11],
+ NotFound: v[11],
+ }, nil
+}
+
+func parseNetwork(v []uint64) (Network, error) {
+ if len(v) != 4 {
+ return Network{}, fmt.Errorf("invalid Network line %q", v)
+ }
+
+ return Network{
+ NetCount: v[0],
+ UDPCount: v[1],
+ TCPCount: v[2],
+ TCPConnect: v[3],
+ }, nil
+}
+
+func parseServerRPC(v []uint64) (ServerRPC, error) {
+ if len(v) != 5 {
+ return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v)
+ }
+
+ return ServerRPC{
+ RPCCount: v[0],
+ BadCnt: v[1],
+ BadFmt: v[2],
+ BadAuth: v[3],
+ BadcInt: v[4],
+ }, nil
+}
+
+func parseClientRPC(v []uint64) (ClientRPC, error) {
+ if len(v) != 3 {
+ return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v)
+ }
+
+ return ClientRPC{
+ RPCCount: v[0],
+ Retransmissions: v[1],
+ AuthRefreshes: v[2],
+ }, nil
+}
+
+func parseV2Stats(v []uint64) (V2Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 18 {
+ return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v)
+ }
+
+ return V2Stats{
+ Null: v[1],
+ GetAttr: v[2],
+ SetAttr: v[3],
+ Root: v[4],
+ Lookup: v[5],
+ ReadLink: v[6],
+ Read: v[7],
+ WrCache: v[8],
+ Write: v[9],
+ Create: v[10],
+ Remove: v[11],
+ Rename: v[12],
+ Link: v[13],
+ SymLink: v[14],
+ MkDir: v[15],
+ RmDir: v[16],
+ ReadDir: v[17],
+ FsStat: v[18],
+ }, nil
+}
+
+func parseV3Stats(v []uint64) (V3Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 22 {
+ return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v)
+ }
+
+ return V3Stats{
+ Null: v[1],
+ GetAttr: v[2],
+ SetAttr: v[3],
+ Lookup: v[4],
+ Access: v[5],
+ ReadLink: v[6],
+ Read: v[7],
+ Write: v[8],
+ Create: v[9],
+ MkDir: v[10],
+ SymLink: v[11],
+ MkNod: v[12],
+ Remove: v[13],
+ RmDir: v[14],
+ Rename: v[15],
+ Link: v[16],
+ ReadDir: v[17],
+ ReadDirPlus: v[18],
+ FsStat: v[19],
+ FsInfo: v[20],
+ PathConf: v[21],
+ Commit: v[22],
+ }, nil
+}
+
+func parseClientV4Stats(v []uint64) (ClientV4Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values {
+ return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v)
+ }
+
+ // This function currently supports mapping 59 NFS v4 client stats. Older
+ // kernels may emit fewer stats, so we must detect this and pad out the
+ // values to match the expected slice size.
+ if values < 59 {
+ newValues := make([]uint64, 60)
+ copy(newValues, v)
+ v = newValues
+ }
+
+ return ClientV4Stats{
+ Null: v[1],
+ Read: v[2],
+ Write: v[3],
+ Commit: v[4],
+ Open: v[5],
+ OpenConfirm: v[6],
+ OpenNoattr: v[7],
+ OpenDowngrade: v[8],
+ Close: v[9],
+ Setattr: v[10],
+ FsInfo: v[11],
+ Renew: v[12],
+ SetClientID: v[13],
+ SetClientIDConfirm: v[14],
+ Lock: v[15],
+ Lockt: v[16],
+ Locku: v[17],
+ Access: v[18],
+ Getattr: v[19],
+ Lookup: v[20],
+ LookupRoot: v[21],
+ Remove: v[22],
+ Rename: v[23],
+ Link: v[24],
+ Symlink: v[25],
+ Create: v[26],
+ Pathconf: v[27],
+ StatFs: v[28],
+ ReadLink: v[29],
+ ReadDir: v[30],
+ ServerCaps: v[31],
+ DelegReturn: v[32],
+ GetACL: v[33],
+ SetACL: v[34],
+ FsLocations: v[35],
+ ReleaseLockowner: v[36],
+ Secinfo: v[37],
+ FsidPresent: v[38],
+ ExchangeID: v[39],
+ CreateSession: v[40],
+ DestroySession: v[41],
+ Sequence: v[42],
+ GetLeaseTime: v[43],
+ ReclaimComplete: v[44],
+ LayoutGet: v[45],
+ GetDeviceInfo: v[46],
+ LayoutCommit: v[47],
+ LayoutReturn: v[48],
+ SecinfoNoName: v[49],
+ TestStateID: v[50],
+ FreeStateID: v[51],
+ GetDeviceList: v[52],
+ BindConnToSession: v[53],
+ DestroyClientID: v[54],
+ Seek: v[55],
+ Allocate: v[56],
+ DeAllocate: v[57],
+ LayoutStats: v[58],
+ Clone: v[59],
+ }, nil
+}
+
+func parseServerV4Stats(v []uint64) (ServerV4Stats, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values != 2 {
+ return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v)
+ }
+
+ return ServerV4Stats{
+ Null: v[1],
+ Compound: v[2],
+ }, nil
+}
+
+func parseV4Ops(v []uint64) (V4Ops, error) {
+ values := int(v[0])
+ if len(v[1:]) != values || values < 39 {
+ return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v)
+ }
+
+ stats := V4Ops{
+ Op0Unused: v[1],
+ Op1Unused: v[2],
+ Op2Future: v[3],
+ Access: v[4],
+ Close: v[5],
+ Commit: v[6],
+ Create: v[7],
+ DelegPurge: v[8],
+ DelegReturn: v[9],
+ GetAttr: v[10],
+ GetFH: v[11],
+ Link: v[12],
+ Lock: v[13],
+ Lockt: v[14],
+ Locku: v[15],
+ Lookup: v[16],
+ LookupRoot: v[17],
+ Nverify: v[18],
+ Open: v[19],
+ OpenAttr: v[20],
+ OpenConfirm: v[21],
+ OpenDgrd: v[22],
+ PutFH: v[23],
+ PutPubFH: v[24],
+ PutRootFH: v[25],
+ Read: v[26],
+ ReadDir: v[27],
+ ReadLink: v[28],
+ Remove: v[29],
+ Rename: v[30],
+ Renew: v[31],
+ RestoreFH: v[32],
+ SaveFH: v[33],
+ SecInfo: v[34],
+ SetAttr: v[35],
+ Verify: v[36],
+ Write: v[37],
+ RelLockOwner: v[38],
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
new file mode 100644
index 00000000..c0d3a5ad
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go
@@ -0,0 +1,67 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs
+func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) {
+ stats := &ClientRPCStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least <key> <value>
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid NFS metric line %q", line)
+ }
+
+ values, err := util.ParseUint64s(parts[1:])
+ if err != nil {
+ return nil, fmt.Errorf("error parsing NFS metric line: %s", err)
+ }
+
+ switch metricLine := parts[0]; metricLine {
+ case "net":
+ stats.Network, err = parseNetwork(values)
+ case "rpc":
+ stats.ClientRPC, err = parseClientRPC(values)
+ case "proc2":
+ stats.V2Stats, err = parseV2Stats(values)
+ case "proc3":
+ stats.V3Stats, err = parseV3Stats(values)
+ case "proc4":
+ stats.ClientV4Stats, err = parseClientV4Stats(values)
+ default:
+ return nil, fmt.Errorf("unknown NFS metric line %q", metricLine)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("errors parsing NFS metric line: %s", err)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning NFS file: %s", err)
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
new file mode 100644
index 00000000..57bb4a35
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go
@@ -0,0 +1,89 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package nfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd
+func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) {
+ stats := &ServerRPCStats{}
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least <key> <value>
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("invalid NFSd metric line %q", line)
+ }
+ label := parts[0]
+
+ var values []uint64
+ var err error
+ if label == "th" {
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("invalid NFSd th metric line %q", line)
+ }
+ values, err = util.ParseUint64s(parts[1:3])
+ } else {
+ values, err = util.ParseUint64s(parts[1:])
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error parsing NFSd metric line: %s", err)
+ }
+
+ switch metricLine := parts[0]; metricLine {
+ case "rc":
+ stats.ReplyCache, err = parseReplyCache(values)
+ case "fh":
+ stats.FileHandles, err = parseFileHandles(values)
+ case "io":
+ stats.InputOutput, err = parseInputOutput(values)
+ case "th":
+ stats.Threads, err = parseThreads(values)
+ case "ra":
+ stats.ReadAheadCache, err = parseReadAheadCache(values)
+ case "net":
+ stats.Network, err = parseNetwork(values)
+ case "rpc":
+ stats.ServerRPC, err = parseServerRPC(values)
+ case "proc2":
+ stats.V2Stats, err = parseV2Stats(values)
+ case "proc3":
+ stats.V3Stats, err = parseV3Stats(values)
+ case "proc4":
+ stats.ServerV4Stats, err = parseServerV4Stats(values)
+ case "proc4ops":
+ stats.V4Ops, err = parseV4Ops(values)
+ default:
+ return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err)
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("error scanning NFSd file: %s", err)
+ }
+
+ return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 0d0a6a90..7cf5b8ac 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -1,6 +1,20 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
+ "bytes"
"fmt"
"io/ioutil"
"os"
@@ -113,7 +127,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
- return strings.Split(string(data[:len(data)-1]), string(byte(0))), nil
+ return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
}
// Comm returns the command name of a process.
@@ -192,6 +206,18 @@ func (p Proc) FileDescriptorsLen() (int, error) {
return len(fds), nil
}
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+ f, err := os.Open(p.path("mountstats"))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+
+ return parseMountStats(f)
+}
+
func (p Proc) fileDescriptors() ([]string, error) {
d, err := os.Open(p.path("fd"))
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index b4e31d7b..0251c83b 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -47,9 +60,6 @@ func (p Proc) NewIO() (ProcIO, error) {
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
- if err != nil {
- return pio, err
- }
- return pio, nil
+ return pio, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index 2df997ce..f04ba6fd 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
@@ -13,46 +26,46 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
- CPUTime int
+ CPUTime int64
// Maximum size of files that the process may create.
- FileSize int
+ FileSize int64
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
- DataSize int
+ DataSize int64
// Maximum size of the process stack in bytes.
- StackSize int
+ StackSize int64
// Maximum size of a core file.
- CoreFileSize int
+ CoreFileSize int64
// Limit of the process's resident set in pages.
- ResidentSet int
+ ResidentSet int64
// Maximum number of processes that can be created for the real user ID of
// the calling process.
- Processes int
+ Processes int64
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
- OpenFiles int
+ OpenFiles int64
// Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory int
+ LockedMemory int64
// Maximum size of the process's virtual memory address space in bytes.
- AddressSpace int
+ AddressSpace int64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
- FileLocks int
+ FileLocks int64
// Limit of signals that may be queued for the real user ID of the calling
// process.
- PendingSignals int
+ PendingSignals int64
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
- MsqqueueSize int
+ MsqqueueSize int64
// Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority int
+ NicePriority int64
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
- RealtimePriority int
+ RealtimePriority int64
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
- RealtimeTimeout int
+ RealtimeTimeout int64
}
const (
@@ -125,13 +138,13 @@ func (p Proc) NewLimits() (ProcLimits, error) {
return l, s.Err()
}
-func parseInt(s string) (int, error) {
+func parseInt(s string) (int64, error) {
if s == limitsUnlimited {
return -1, nil
}
- i, err := strconv.ParseInt(s, 10, 32)
+ i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
}
- return int(i), nil
+ return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
new file mode 100644
index 00000000..d06c26eb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// Namespace represents a single namespace of a process.
+type Namespace struct {
+ Type string // Namespace type.
+ Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
+}
+
+// Namespaces contains all of the namespaces that the process is contained in.
+type Namespaces map[string]Namespace
+
+// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the
+// process is a member.
+func (p Proc) NewNamespaces() (Namespaces, error) {
+ d, err := os.Open(p.path("ns"))
+ if err != nil {
+ return nil, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read contents of ns dir: %v", err)
+ }
+
+ ns := make(Namespaces, len(names))
+ for _, name := range names {
+ target, err := os.Readlink(p.path("ns", name))
+ if err != nil {
+ return nil, err
+ }
+
+ fields := strings.SplitN(target, ":", 2)
+ if len(fields) != 2 {
+ return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target)
+ }
+
+ typ := fields[0]
+ inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err)
+ }
+
+ ns[name] = Namespace{typ, uint32(inode)}
+ }
+
+ return ns, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 724e271b..3cf2a9f1 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -1,3 +1,16 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 1ca217e8..61eb6b0e 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -1,17 +1,81 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
package procfs
import (
"bufio"
"fmt"
+ "io"
"os"
"strconv"
"strings"
)
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+ User float64
+ Nice float64
+ System float64
+ Idle float64
+ Iowait float64
+ IRQ float64
+ SoftIRQ float64
+ Steal float64
+ Guest float64
+ GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading /proc/softirqs
+type SoftIRQStat struct {
+ Hi uint64
+ Timer uint64
+ NetTx uint64
+ NetRx uint64
+ Block uint64
+ BlockIoPoll uint64
+ Tasklet uint64
+ Sched uint64
+ Hrtimer uint64
+ Rcu uint64
+}
+
// Stat represents kernel/system statistics.
type Stat struct {
// Boot time in seconds since the Epoch.
- BootTime int64
+ BootTime uint64
+ // Summed up cpu statistics.
+ CPUTotal CPUStat
+ // Per-CPU statistics.
+ CPU []CPUStat
+ // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+ IRQTotal uint64
+ // Number of times a numbered IRQ was triggered.
+ IRQ []uint64
+ // Number of times a context switch happened.
+ ContextSwitches uint64
+ // Number of times a process was created.
+ ProcessCreated uint64
+ // Number of processes currently running.
+ ProcessesRunning uint64
+ // Number of processes currently blocked (waiting for IO).
+ ProcessesBlocked uint64
+ // Number of times a softirq was scheduled.
+ SoftIRQTotal uint64
+ // Detailed softirq statistics.
+ SoftIRQ SoftIRQStat
}
// NewStat returns kernel/system statistics read from /proc/stat.
@@ -24,33 +88,145 @@ func NewStat() (Stat, error) {
return fs.NewStat()
}
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+ cpuStat := CPUStat{}
+ var cpu string
+
+ count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+ &cpu,
+ &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+ &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+ &cpuStat.Guest, &cpuStat.GuestNice)
+
+ if err != nil && err != io.EOF {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
+ }
+ if count == 0 {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
+ }
+
+ cpuStat.User /= userHZ
+ cpuStat.Nice /= userHZ
+ cpuStat.System /= userHZ
+ cpuStat.Idle /= userHZ
+ cpuStat.Iowait /= userHZ
+ cpuStat.IRQ /= userHZ
+ cpuStat.SoftIRQ /= userHZ
+ cpuStat.Steal /= userHZ
+ cpuStat.Guest /= userHZ
+ cpuStat.GuestNice /= userHZ
+
+ if cpu == "cpu" {
+ return cpuStat, -1, nil
+ }
+
+ cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+ if err != nil {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
+ }
+
+ return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+ softIRQStat := SoftIRQStat{}
+ var total uint64
+ var prefix string
+
+ _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+ &prefix, &total,
+ &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+ &softIRQStat.Block, &softIRQStat.BlockIoPoll,
+ &softIRQStat.Tasklet, &softIRQStat.Sched,
+ &softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+ if err != nil {
+ return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
+ }
+
+ return softIRQStat, total, nil
+}
+
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
+ // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+
f, err := os.Open(fs.Path("stat"))
if err != nil {
return Stat{}, err
}
defer f.Close()
- s := bufio.NewScanner(f)
- for s.Scan() {
- line := s.Text()
- if !strings.HasPrefix(line, "btime") {
+ stat := Stat{}
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least <key> <value>
+ if len(parts) < 2 {
continue
}
- fields := strings.Fields(line)
- if len(fields) != 2 {
- return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
- }
- i, err := strconv.ParseInt(fields[1], 10, 32)
- if err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ switch {
+ case parts[0] == "btime":
+ if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
+ }
+ case parts[0] == "intr":
+ if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
+ }
+ numberedIRQs := parts[2:]
+ stat.IRQ = make([]uint64, len(numberedIRQs))
+ for i, count := range numberedIRQs {
+ if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
+ }
+ }
+ case parts[0] == "ctxt":
+ if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
+ }
+ case parts[0] == "processes":
+ if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
+ }
+ case parts[0] == "procs_running":
+ if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
+ }
+ case parts[0] == "procs_blocked":
+ if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
+ }
+ case parts[0] == "softirq":
+ softIRQStats, total, err := parseSoftIRQStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ stat.SoftIRQTotal = total
+ stat.SoftIRQ = softIRQStats
+ case strings.HasPrefix(parts[0], "cpu"):
+ cpuStat, cpuID, err := parseCPUStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ if cpuID == -1 {
+ stat.CPUTotal = cpuStat
+ } else {
+ for int64(len(stat.CPU)) <= cpuID {
+ stat.CPU = append(stat.CPU, CPUStat{})
+ }
+ stat.CPU[cpuID] = cpuStat
+ }
}
- return Stat{BootTime: i}, nil
}
- if err := s.Err(); err != nil {
+
+ if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
}
- return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+ return stat, nil
}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
new file mode 100755
index 00000000..b0171a12
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ttar
@@ -0,0 +1,389 @@
+#!/usr/bin/env bash
+
+# Purpose: plain text tar format
+# Limitations: - only suitable for text files, directories, and symlinks
+# - stores only filename, content, and mode
+# - not designed for untrusted input
+#
+# Note: must work with bash version 3.2 (macOS)
+
+# Copyright 2017 Roger Luethi
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit -o nounset
+
+# Sanitize environment (for instance, standard sorting of glob matches)
+export LC_ALL=C
+
+path=""
+CMD=""
+ARG_STRING="$*"
+
+#------------------------------------------------------------------------------
+# Not all sed implementations can work on null bytes. In order to make ttar
+# work out of the box on macOS, use Python as a stream editor.
+
+USE_PYTHON=0
+
+PYTHON_CREATE_FILTER=$(cat << 'PCF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+ line = re.sub(r'EOF', r'\EOF', line)
+ line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
+ line = re.sub('\x00', r'NULLBYTE', line)
+ sys.stdout.write(line)
+PCF
+)
+
+PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+ line = re.sub(r'(?<!\\)NULLBYTE', '\x00', line)
+ line = re.sub(r'\\NULLBYTE', 'NULLBYTE', line)
+ line = re.sub(r'([^\\])EOF', r'\1', line)
+ line = re.sub(r'\\EOF', 'EOF', line)
+ sys.stdout.write(line)
+PEF
+)
+
+function test_environment {
+ if [[ "$(echo "a" | sed 's/a/\x0/' | wc -c)" -ne 2 ]]; then
+ echo "WARNING sed unable to handle null bytes, using Python (slow)."
+ if ! which python >/dev/null; then
+ echo "ERROR Python not found. Aborting."
+ exit 2
+ fi
+ USE_PYTHON=1
+ fi
+}
+
+#------------------------------------------------------------------------------
+
+function usage {
+ bname=$(basename "$0")
+ cat << USAGE
+Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
+ $bname -t -f <ARCHIVE> (list archive contents)
+ $bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
+
+Options:
+ -C <DIR> (change directory)
+ -v (verbose)
+
+Example: Change to sysfs directory, create ttar file from fixtures directory
+ $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
+USAGE
+exit "$1"
+}
+
+function vecho {
+ if [ "${VERBOSE:-}" == "yes" ]; then
+ echo >&7 "$@"
+ fi
+}
+
+function set_cmd {
+ if [ -n "$CMD" ]; then
+ echo "ERROR: more than one command given"
+ echo
+ usage 2
+ fi
+ CMD=$1
+}
+
+unset VERBOSE
+
+while getopts :cf:htxvC: opt; do
+ case $opt in
+ c)
+ set_cmd "create"
+ ;;
+ f)
+ ARCHIVE=$OPTARG
+ ;;
+ h)
+ usage 0
+ ;;
+ t)
+ set_cmd "list"
+ ;;
+ x)
+ set_cmd "extract"
+ ;;
+ v)
+ VERBOSE=yes
+ exec 7>&1
+ ;;
+ C)
+ CDIR=$OPTARG
+ ;;
+ *)
+ echo >&2 "ERROR: invalid option -$OPTARG"
+ echo
+ usage 1
+ ;;
+ esac
+done
+
+# Remove processed options from arguments
+shift $(( OPTIND - 1 ));
+
+if [ "${CMD:-}" == "" ]; then
+ echo >&2 "ERROR: no command given"
+ echo
+ usage 1
+elif [ "${ARCHIVE:-}" == "" ]; then
+ echo >&2 "ERROR: no archive name given"
+ echo
+ usage 1
+fi
+
+function list {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while read -r line; do
+ line_no=$(( line_no + 1 ))
+ if [ $size -gt 0 ]; then
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ echo "$path"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ echo "$path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ echo "$path -> ${BASH_REMATCH[1]}"
+ fi
+ done < "$ttar_file"
+}
+
+function extract {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while IFS= read -r line; do
+ line_no=$(( line_no + 1 ))
+ local eof_without_newline
+ if [ "$size" -gt 0 ]; then
+ if [[ "$line" =~ [^\\]EOF ]]; then
+ # An EOF not preceeded by a backslash indicates that the line
+ # does not end with a newline
+ eof_without_newline=1
+ else
+ eof_without_newline=0
+ fi
+ # Replace NULLBYTE with null byte if at beginning of line
+ # Replace NULLBYTE with null byte unless preceeded by backslash
+ # Remove one backslash in front of NULLBYTE (if any)
+ # Remove EOF unless preceeded by backslash
+ # Remove one backslash in front of EOF
+ if [ $USE_PYTHON -eq 1 ]; then
+ echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
+ else
+ # The repeated pattern makes up for sed's lack of negative
+ # lookbehind assertions (for consecutive null bytes).
+ echo -n "$line" | \
+ sed -e 's/^NULLBYTE/\x0/g;
+ s/\([^\\]\)NULLBYTE/\1\x0/g;
+ s/\([^\\]\)NULLBYTE/\1\x0/g;
+ s/\\NULLBYTE/NULLBYTE/g;
+ s/\([^\\]\)EOF/\1/g;
+ s/\\EOF/EOF/g;
+ ' >> "$path"
+ fi
+ if [[ "$eof_without_newline" -eq 0 ]]; then
+ echo >> "$path"
+ fi
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ if [ -e "$path" ] || [ -L "$path" ]; then
+ rm "$path"
+ fi
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ # Create file even if it is zero-length.
+ touch "$path"
+ vecho " $path"
+ elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
+ mode=${BASH_REMATCH[1]}
+ chmod "$mode" "$path"
+ vecho "$mode"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ mkdir -p "$path"
+ vecho " $path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ ln -s "${BASH_REMATCH[1]}" "$path"
+ vecho " $path -> ${BASH_REMATCH[1]}"
+ elif [[ $line =~ ^# ]]; then
+ # Ignore comments between files
+ continue
+ else
+ echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
+ exit 1
+ fi
+ done < "$ttar_file"
+}
+
+function div {
+ echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
+ "- - - - - -"
+}
+
+function get_mode {
+ local mfile=$1
+ if [ -z "${STAT_OPTION:-}" ]; then
+ if stat -c '%a' "$mfile" >/dev/null 2>&1; then
+ # GNU stat
+ STAT_OPTION='-c'
+ STAT_FORMAT='%a'
+ else
+ # BSD stat
+ STAT_OPTION='-f'
+ # Octal output, user/group/other (omit file type, sticky bit)
+ STAT_FORMAT='%OLp'
+ fi
+ fi
+ stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
+}
+
+function _create {
+ shopt -s nullglob
+ local mode
+ local eof_without_newline
+ while (( "$#" )); do
+ file=$1
+ if [ -L "$file" ]; then
+ echo "Path: $file"
+ symlinkTo=$(readlink "$file")
+ echo "SymlinkTo: $symlinkTo"
+ vecho " $file -> $symlinkTo"
+ div
+ elif [ -d "$file" ]; then
+ # Strip trailing slash (if there is one)
+ file=${file%/}
+ echo "Directory: $file"
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file/"
+ div
+ # Find all files and dirs, including hidden/dot files
+ for x in "$file/"{*,.[^.]*}; do
+ _create "$x"
+ done
+ elif [ -f "$file" ]; then
+ echo "Path: $file"
+ lines=$(wc -l "$file"|awk '{print $1}')
+ eof_without_newline=0
+ if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
+ [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
+ eof_without_newline=1
+ lines=$((lines+1))
+ fi
+ echo "Lines: $lines"
+ # Add backslash in front of EOF
+ # Add backslash in front of NULLBYTE
+ # Replace null byte with NULLBYTE
+ if [ $USE_PYTHON -eq 1 ]; then
+ < "$file" python -c "$PYTHON_CREATE_FILTER"
+ else
+ < "$file" \
+ sed 's/EOF/\\EOF/g;
+ s/NULLBYTE/\\NULLBYTE/g;
+ s/\x0/NULLBYTE/g;
+ '
+ fi
+ if [[ "$eof_without_newline" -eq 1 ]]; then
+ # Finish line with EOF to indicate that the original line did
+ # not end with a linefeed
+ echo "EOF"
+ fi
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file"
+ div
+ else
+ echo >&2 "ERROR: file not found ($file in $(pwd))"
+ exit 2
+ fi
+ shift
+ done
+}
+
+function create {
+ ttar_file=$1
+ shift
+ if [ -z "${1:-}" ]; then
+ echo >&2 "ERROR: missing arguments."
+ echo
+ usage 1
+ fi
+ if [ -e "$ttar_file" ]; then
+ rm "$ttar_file"
+ fi
+ exec > "$ttar_file"
+ echo "# Archive created by ttar $ARG_STRING"
+ _create "$@"
+}
+
+test_environment
+
+if [ -n "${CDIR:-}" ]; then
+ if [[ "$ARCHIVE" != /* ]]; then
+ # Relative path: preserve the archive's location before changing
+ # directory
+ ARCHIVE="$(pwd)/$ARCHIVE"
+ fi
+ cd "$CDIR"
+fi
+
+"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
new file mode 100644
index 00000000..8f1508f0
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfrm.go
@@ -0,0 +1,187 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+ // All errors which are not matched by other
+ XfrmInError int
+ // No buffer is left
+ XfrmInBufferError int
+ // Header Error
+ XfrmInHdrError int
+ // No state found
+ // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+ XfrmInNoStates int
+ // Transformation protocol specific error
+ // e.g. SA Key is wrong
+ XfrmInStateProtoError int
+ // Transformation mode specific error
+ XfrmInStateModeError int
+ // Sequence error
+ // e.g. sequence number is out of window
+ XfrmInStateSeqError int
+ // State is expired
+ XfrmInStateExpired int
+ // State has mismatch option
+ // e.g. UDP encapsulation type is mismatched
+ XfrmInStateMismatch int
+ // State is invalid
+ XfrmInStateInvalid int
+ // No matching template for states
+ // e.g. Inbound SAs are correct but SP rule is wrong
+ XfrmInTmplMismatch int
+ // No policy is found for states
+ // e.g. Inbound SAs are correct but no SP is found
+ XfrmInNoPols int
+ // Policy discards
+ XfrmInPolBlock int
+ // Policy error
+ XfrmInPolError int
+ // All errors which are not matched by others
+ XfrmOutError int
+ // Bundle generation error
+ XfrmOutBundleGenError int
+ // Bundle check error
+ XfrmOutBundleCheckError int
+ // No state was found
+ XfrmOutNoStates int
+ // Transformation protocol specific error
+ XfrmOutStateProtoError int
+ // Transportation mode specific error
+ XfrmOutStateModeError int
+ // Sequence error
+ // i.e sequence number overflow
+ XfrmOutStateSeqError int
+ // State is expired
+ XfrmOutStateExpired int
+ // Policy discads
+ XfrmOutPolBlock int
+ // Policy is dead
+ XfrmOutPolDead int
+ // Policy Error
+ XfrmOutPolError int
+ XfrmFwdHdrError int
+ XfrmOutStateInvalid int
+ XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+ file, err := os.Open(fs.Path("net/xfrm_stat"))
+ if err != nil {
+ return XfrmStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ x = XfrmStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return XfrmStat{}, fmt.Errorf(
+ "couldn't parse %s line %s", file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ switch name {
+ case "XfrmInError":
+ x.XfrmInError = value
+ case "XfrmInBufferError":
+ x.XfrmInBufferError = value
+ case "XfrmInHdrError":
+ x.XfrmInHdrError = value
+ case "XfrmInNoStates":
+ x.XfrmInNoStates = value
+ case "XfrmInStateProtoError":
+ x.XfrmInStateProtoError = value
+ case "XfrmInStateModeError":
+ x.XfrmInStateModeError = value
+ case "XfrmInStateSeqError":
+ x.XfrmInStateSeqError = value
+ case "XfrmInStateExpired":
+ x.XfrmInStateExpired = value
+ case "XfrmInStateInvalid":
+ x.XfrmInStateInvalid = value
+ case "XfrmInTmplMismatch":
+ x.XfrmInTmplMismatch = value
+ case "XfrmInNoPols":
+ x.XfrmInNoPols = value
+ case "XfrmInPolBlock":
+ x.XfrmInPolBlock = value
+ case "XfrmInPolError":
+ x.XfrmInPolError = value
+ case "XfrmOutError":
+ x.XfrmOutError = value
+ case "XfrmInStateMismatch":
+ x.XfrmInStateMismatch = value
+ case "XfrmOutBundleGenError":
+ x.XfrmOutBundleGenError = value
+ case "XfrmOutBundleCheckError":
+ x.XfrmOutBundleCheckError = value
+ case "XfrmOutNoStates":
+ x.XfrmOutNoStates = value
+ case "XfrmOutStateProtoError":
+ x.XfrmOutStateProtoError = value
+ case "XfrmOutStateModeError":
+ x.XfrmOutStateModeError = value
+ case "XfrmOutStateSeqError":
+ x.XfrmOutStateSeqError = value
+ case "XfrmOutStateExpired":
+ x.XfrmOutStateExpired = value
+ case "XfrmOutPolBlock":
+ x.XfrmOutPolBlock = value
+ case "XfrmOutPolDead":
+ x.XfrmOutPolDead = value
+ case "XfrmOutPolError":
+ x.XfrmOutPolError = value
+ case "XfrmFwdHdrError":
+ x.XfrmFwdHdrError = value
+ case "XfrmOutStateInvalid":
+ x.XfrmOutStateInvalid = value
+ case "XfrmAcquireError":
+ x.XfrmAcquireError = value
+ }
+
+ }
+
+ return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go
new file mode 100644
index 00000000..2bc0ef34
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/parse.go
@@ -0,0 +1,330 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package xfs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ParseStats parses a Stats from an input io.Reader, using the format
+// found in /proc/fs/xfs/stat.
+func ParseStats(r io.Reader) (*Stats, error) {
+ const (
+ // Fields parsed into stats structures.
+ fieldExtentAlloc = "extent_alloc"
+ fieldAbt = "abt"
+ fieldBlkMap = "blk_map"
+ fieldBmbt = "bmbt"
+ fieldDir = "dir"
+ fieldTrans = "trans"
+ fieldIg = "ig"
+ fieldLog = "log"
+ fieldRw = "rw"
+ fieldAttr = "attr"
+ fieldIcluster = "icluster"
+ fieldVnodes = "vnodes"
+ fieldBuf = "buf"
+ fieldXpc = "xpc"
+
+ // Unimplemented at this time due to lack of documentation.
+ fieldPushAil = "push_ail"
+ fieldXstrat = "xstrat"
+ fieldAbtb2 = "abtb2"
+ fieldAbtc2 = "abtc2"
+ fieldBmbt2 = "bmbt2"
+ fieldIbt2 = "ibt2"
+ fieldFibt2 = "fibt2"
+ fieldQm = "qm"
+ fieldDebug = "debug"
+ )
+
+ var xfss Stats
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ // Expect at least a string label and a single integer value, ex:
+ // - abt 0
+ // - rw 1 2
+ ss := strings.Fields(string(s.Bytes()))
+ if len(ss) < 2 {
+ continue
+ }
+ label := ss[0]
+
+ // Extended precision counters are uint64 values.
+ if label == fieldXpc {
+ us, err := util.ParseUint64s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ xfss.ExtendedPrecision, err = extendedPrecisionStats(us)
+ if err != nil {
+ return nil, err
+ }
+
+ continue
+ }
+
+ // All other counters are uint32 values.
+ us, err := util.ParseUint32s(ss[1:])
+ if err != nil {
+ return nil, err
+ }
+
+ switch label {
+ case fieldExtentAlloc:
+ xfss.ExtentAllocation, err = extentAllocationStats(us)
+ case fieldAbt:
+ xfss.AllocationBTree, err = btreeStats(us)
+ case fieldBlkMap:
+ xfss.BlockMapping, err = blockMappingStats(us)
+ case fieldBmbt:
+ xfss.BlockMapBTree, err = btreeStats(us)
+ case fieldDir:
+ xfss.DirectoryOperation, err = directoryOperationStats(us)
+ case fieldTrans:
+ xfss.Transaction, err = transactionStats(us)
+ case fieldIg:
+ xfss.InodeOperation, err = inodeOperationStats(us)
+ case fieldLog:
+ xfss.LogOperation, err = logOperationStats(us)
+ case fieldRw:
+ xfss.ReadWrite, err = readWriteStats(us)
+ case fieldAttr:
+ xfss.AttributeOperation, err = attributeOperationStats(us)
+ case fieldIcluster:
+ xfss.InodeClustering, err = inodeClusteringStats(us)
+ case fieldVnodes:
+ xfss.Vnode, err = vnodeStats(us)
+ case fieldBuf:
+ xfss.Buffer, err = bufferStats(us)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &xfss, s.Err()
+}
+
+// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.
+func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {
+ if l := len(us); l != 4 {
+ return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l)
+ }
+
+ return ExtentAllocationStats{
+ ExtentsAllocated: us[0],
+ BlocksAllocated: us[1],
+ ExtentsFreed: us[2],
+ BlocksFreed: us[3],
+ }, nil
+}
+
+// btreeStats builds a BTreeStats from a slice of uint32s.
+func btreeStats(us []uint32) (BTreeStats, error) {
+ if l := len(us); l != 4 {
+ return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l)
+ }
+
+ return BTreeStats{
+ Lookups: us[0],
+ Compares: us[1],
+ RecordsInserted: us[2],
+ RecordsDeleted: us[3],
+ }, nil
+}
+
+// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.
+func blockMappingStats(us []uint32) (BlockMappingStats, error) {
+ if l := len(us); l != 7 {
+ return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l)
+ }
+
+ return BlockMappingStats{
+ Reads: us[0],
+ Writes: us[1],
+ Unmaps: us[2],
+ ExtentListInsertions: us[3],
+ ExtentListDeletions: us[4],
+ ExtentListLookups: us[5],
+ ExtentListCompares: us[6],
+ }, nil
+}
+
+// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.
+func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {
+ if l := len(us); l != 4 {
+ return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l)
+ }
+
+ return DirectoryOperationStats{
+ Lookups: us[0],
+ Creates: us[1],
+ Removes: us[2],
+ Getdents: us[3],
+ }, nil
+}
+
+// TransactionStats builds a TransactionStats from a slice of uint32s.
+func transactionStats(us []uint32) (TransactionStats, error) {
+ if l := len(us); l != 3 {
+ return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l)
+ }
+
+ return TransactionStats{
+ Sync: us[0],
+ Async: us[1],
+ Empty: us[2],
+ }, nil
+}
+
+// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.
+func inodeOperationStats(us []uint32) (InodeOperationStats, error) {
+ if l := len(us); l != 7 {
+ return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l)
+ }
+
+ return InodeOperationStats{
+ Attempts: us[0],
+ Found: us[1],
+ Recycle: us[2],
+ Missed: us[3],
+ Duplicate: us[4],
+ Reclaims: us[5],
+ AttributeChange: us[6],
+ }, nil
+}
+
+// LogOperationStats builds a LogOperationStats from a slice of uint32s.
+func logOperationStats(us []uint32) (LogOperationStats, error) {
+ if l := len(us); l != 5 {
+ return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l)
+ }
+
+ return LogOperationStats{
+ Writes: us[0],
+ Blocks: us[1],
+ NoInternalBuffers: us[2],
+ Force: us[3],
+ ForceSleep: us[4],
+ }, nil
+}
+
+// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.
+func readWriteStats(us []uint32) (ReadWriteStats, error) {
+ if l := len(us); l != 2 {
+ return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l)
+ }
+
+ return ReadWriteStats{
+ Read: us[0],
+ Write: us[1],
+ }, nil
+}
+
+// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.
+func attributeOperationStats(us []uint32) (AttributeOperationStats, error) {
+ if l := len(us); l != 4 {
+ return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l)
+ }
+
+ return AttributeOperationStats{
+ Get: us[0],
+ Set: us[1],
+ Remove: us[2],
+ List: us[3],
+ }, nil
+}
+
+// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.
+func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {
+ if l := len(us); l != 3 {
+ return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l)
+ }
+
+ return InodeClusteringStats{
+ Iflush: us[0],
+ Flush: us[1],
+ FlushInode: us[2],
+ }, nil
+}
+
+// VnodeStats builds a VnodeStats from a slice of uint32s.
+func vnodeStats(us []uint32) (VnodeStats, error) {
+ // The attribute "Free" appears to not be available on older XFS
+ // stats versions. Therefore, 7 or 8 elements may appear in
+ // this slice.
+ l := len(us)
+ if l != 7 && l != 8 {
+ return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
+ }
+
+ s := VnodeStats{
+ Active: us[0],
+ Allocate: us[1],
+ Get: us[2],
+ Hold: us[3],
+ Release: us[4],
+ Reclaim: us[5],
+ Remove: us[6],
+ }
+
+ // Skip adding free, unless it is present. The zero value will
+ // be used in place of an actual count.
+ if l == 7 {
+ return s, nil
+ }
+
+ s.Free = us[7]
+ return s, nil
+}
+
+// BufferStats builds a BufferStats from a slice of uint32s.
+func bufferStats(us []uint32) (BufferStats, error) {
+ if l := len(us); l != 9 {
+ return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l)
+ }
+
+ return BufferStats{
+ Get: us[0],
+ Create: us[1],
+ GetLocked: us[2],
+ GetLockedWaited: us[3],
+ BusyLocked: us[4],
+ MissLocked: us[5],
+ PageRetries: us[6],
+ PageFound: us[7],
+ GetRead: us[8],
+ }, nil
+}
+
+// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.
+func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {
+ if l := len(us); l != 3 {
+ return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l)
+ }
+
+ return ExtendedPrecisionStats{
+ FlushBytes: us[0],
+ WriteBytes: us[1],
+ ReadBytes: us[2],
+ }, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go
new file mode 100644
index 00000000..d86794b7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go
@@ -0,0 +1,163 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package xfs provides access to statistics exposed by the XFS filesystem.
+package xfs
+
+// Stats contains XFS filesystem runtime statistics, parsed from
+// /proc/fs/xfs/stat.
+//
+// The names and meanings of each statistic were taken from
+// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux
+// kernel source. Most counters are uint32s (same data types used in
+// xfs_stats.h), but some of the "extended precision stats" are uint64s.
+type Stats struct {
+ // The name of the filesystem used to source these statistics.
+ // If empty, this indicates aggregated statistics for all XFS
+ // filesystems on the host.
+ Name string
+
+ ExtentAllocation ExtentAllocationStats
+ AllocationBTree BTreeStats
+ BlockMapping BlockMappingStats
+ BlockMapBTree BTreeStats
+ DirectoryOperation DirectoryOperationStats
+ Transaction TransactionStats
+ InodeOperation InodeOperationStats
+ LogOperation LogOperationStats
+ ReadWrite ReadWriteStats
+ AttributeOperation AttributeOperationStats
+ InodeClustering InodeClusteringStats
+ Vnode VnodeStats
+ Buffer BufferStats
+ ExtendedPrecision ExtendedPrecisionStats
+}
+
+// ExtentAllocationStats contains statistics regarding XFS extent allocations.
+type ExtentAllocationStats struct {
+ ExtentsAllocated uint32
+ BlocksAllocated uint32
+ ExtentsFreed uint32
+ BlocksFreed uint32
+}
+
+// BTreeStats contains statistics regarding an XFS internal B-tree.
+type BTreeStats struct {
+ Lookups uint32
+ Compares uint32
+ RecordsInserted uint32
+ RecordsDeleted uint32
+}
+
+// BlockMappingStats contains statistics regarding XFS block maps.
+type BlockMappingStats struct {
+ Reads uint32
+ Writes uint32
+ Unmaps uint32
+ ExtentListInsertions uint32
+ ExtentListDeletions uint32
+ ExtentListLookups uint32
+ ExtentListCompares uint32
+}
+
+// DirectoryOperationStats contains statistics regarding XFS directory entries.
+type DirectoryOperationStats struct {
+ Lookups uint32
+ Creates uint32
+ Removes uint32
+ Getdents uint32
+}
+
+// TransactionStats contains statistics regarding XFS metadata transactions.
+type TransactionStats struct {
+ Sync uint32
+ Async uint32
+ Empty uint32
+}
+
+// InodeOperationStats contains statistics regarding XFS inode operations.
+type InodeOperationStats struct {
+ Attempts uint32
+ Found uint32
+ Recycle uint32
+ Missed uint32
+ Duplicate uint32
+ Reclaims uint32
+ AttributeChange uint32
+}
+
+// LogOperationStats contains statistics regarding the XFS log buffer.
+type LogOperationStats struct {
+ Writes uint32
+ Blocks uint32
+ NoInternalBuffers uint32
+ Force uint32
+ ForceSleep uint32
+}
+
+// ReadWriteStats contains statistics regarding the number of read and write
+// system calls for XFS filesystems.
+type ReadWriteStats struct {
+ Read uint32
+ Write uint32
+}
+
+// AttributeOperationStats contains statistics regarding manipulation of
+// XFS extended file attributes.
+type AttributeOperationStats struct {
+ Get uint32
+ Set uint32
+ Remove uint32
+ List uint32
+}
+
+// InodeClusteringStats contains statistics regarding XFS inode clustering
+// operations.
+type InodeClusteringStats struct {
+ Iflush uint32
+ Flush uint32
+ FlushInode uint32
+}
+
+// VnodeStats contains statistics regarding XFS vnode operations.
+type VnodeStats struct {
+ Active uint32
+ Allocate uint32
+ Get uint32
+ Hold uint32
+ Release uint32
+ Reclaim uint32
+ Remove uint32
+ Free uint32
+}
+
+// BufferStats contains statistics regarding XFS read/write I/O buffers.
+type BufferStats struct {
+ Get uint32
+ Create uint32
+ GetLocked uint32
+ GetLockedWaited uint32
+ BusyLocked uint32
+ MissLocked uint32
+ PageRetries uint32
+ PageFound uint32
+ GetRead uint32
+}
+
+// ExtendedPrecisionStats contains high precision counters used to track the
+// total number of bytes read, written, or flushed, during XFS operations.
+type ExtendedPrecisionStats struct {
+ FlushBytes uint64
+ WriteBytes uint64
+ ReadBytes uint64
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index d9192f99..6936fed9 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -3,56 +3,74 @@
"ignore": "test",
"package": [
{
- "checksumSHA1": "spyv5/YFBjYyZLZa1U2LBfDR8PM=",
+ "checksumSHA1": "0rido7hYHQtfq3UJzVT5LClLAWc=",
"path": "github.com/beorn7/perks/quantile",
- "revision": "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
+ "revision": "3a771d992973f24aa725d07868b467d1ddfceafb",
+ "revisionTime": "2018-03-21T16:47:47Z"
},
{
- "checksumSHA1": "eG45Hg4ZnB4b1KuqsMknc5eAx4A=",
+ "checksumSHA1": "NPefLsS2pv0+YlXAuJJvkNOMgyE=",
"path": "github.com/golang/protobuf/jsonpb",
- "revision": "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b",
- "revisionTime": "2018-03-28T16:31:53Z"
+ "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265",
+ "revisionTime": "2018-04-30T18:52:41Z",
+ "version": "v1.1.0",
+ "versionExact": "v1.1.0"
},
{
- "checksumSHA1": "XnbEHQRzINRKXKmu9GcaqGkK4Lg=",
+ "checksumSHA1": "Pyou8mceOASSFxc7GeXZuVdSMi0=",
"path": "github.com/golang/protobuf/proto",
- "revision": "f592bd283e9ef86337a432eb50e592278c3d534d"
+ "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265",
+ "revisionTime": "2018-04-30T18:52:41Z",
+ "version": "v1.1.0",
+ "versionExact": "v1.1.0"
},
{
- "checksumSHA1": "VfkiItDBFFkZluaAMAzJipDXNBY=",
+ "checksumSHA1": "/s0InJhSrxhTpqw5FIKgSMknCfM=",
"path": "github.com/golang/protobuf/ptypes",
- "revision": "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b",
- "revisionTime": "2018-03-28T16:31:53Z"
+ "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265",
+ "revisionTime": "2018-04-30T18:52:41Z",
+ "version": "v1.1.0",
+ "versionExact": "v1.1.0"
},
{
- "checksumSHA1": "yusEaGMuL7OWZSXKJzPEwVC0WqI=",
+ "checksumSHA1": "3eqU9o+NMZSLM/coY5WDq7C1uKg=",
"path": "github.com/golang/protobuf/ptypes/any",
- "revision": "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b",
- "revisionTime": "2018-03-28T16:31:53Z"
+ "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265",
+ "revisionTime": "2018-04-30T18:52:41Z",
+ "version": "v1.1.0",
+ "versionExact": "v1.1.0"
},
{
- "checksumSHA1": "hUjAj0dheFVDl84BAnSWj9qy2iY=",
+ "checksumSHA1": "ZIF0rnVzNLluFPqUebtJrVonMr4=",
"path": "github.com/golang/protobuf/ptypes/duration",
- "revision": "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b",
- "revisionTime": "2018-03-28T16:31:53Z"
+ "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265",
+ "revisionTime": "2018-04-30T18:52:41Z",
+ "version": "v1.1.0",
+ "versionExact": "v1.1.0"
},
{
- "checksumSHA1": "K61Qz4x9QeD2mACYgMP9fcpBqNg=",
+ "checksumSHA1": "7Az4Zl9T11I+xOfjgs/3/YMJ24I=",
"path": "github.com/golang/protobuf/ptypes/empty",
- "revision": "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b",
- "revisionTime": "2018-03-28T16:31:53Z"
+ "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265",
+ "revisionTime": "2018-04-30T18:52:41Z",
+ "version": "v1.1.0",
+ "versionExact": "v1.1.0"
},
{
- "checksumSHA1": "Ylq6kq3KWBy6mu68oyEwenhNMdg=",
+ "checksumSHA1": "QwgMLtaLi/6xHaNiDFwHN844AkI=",
"path": "github.com/golang/protobuf/ptypes/struct",
- "revision": "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b",
- "revisionTime": "2018-03-28T16:31:53Z"
+ "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265",
+ "revisionTime": "2018-04-30T18:52:41Z",
+ "version": "v1.1.0",
+ "versionExact": "v1.1.0"
},
{
- "checksumSHA1": "O2ItP5rmfrgxPufhjJXbFlXuyL8=",
+ "checksumSHA1": "1FJvuT0UllZaaS43kmPlx8oNiCs=",
"path": "github.com/golang/protobuf/ptypes/timestamp",
- "revision": "e09c5db296004fbe3f74490e84dcd62c3c5ddb1b",
- "revisionTime": "2018-03-28T16:31:53Z"
+ "revision": "b4deda0973fb4c70b50d226b1af49f3da59f5265",
+ "revisionTime": "2018-04-30T18:52:41Z",
+ "version": "v1.1.0",
+ "versionExact": "v1.1.0"
},
{
"checksumSHA1": "5TKR3lamABvUhxkopYnphszS+Xc=",
@@ -149,7 +167,10 @@
"checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=",
"comment": "v1.0.0-2-gc12348c",
"path": "github.com/matttproud/golang_protobuf_extensions/pbutil",
- "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+ "revision": "c12348ce28de40eed0136aa2b644d0ee0650e56c",
+ "revisionTime": "2016-04-24T11:30:07Z",
+ "version": "v1.0.1",
+ "versionExact": "v1.0.1"
},
{
"checksumSHA1": "l53Jm3mtlea/RzeydlryqPLeuL0=",
@@ -164,42 +185,64 @@
"revisionTime": "2018-01-27T01:58:12Z"
},
{
- "checksumSHA1": "Ph+qmEo8RdBKBHZUhx0y5Oyk/U0=",
- "comment": "v0.8.0-9-g334af01",
+ "checksumSHA1": "vuxZYORrJkB2dB6OXtqVCfT7+0I=",
"path": "github.com/prometheus/client_golang/prometheus",
- "revision": "334af0119a8f8fb6af5bb950d535c482cac7f836"
+ "revision": "5b23715facdef1452016bae512489c3cdf82458c",
+ "revisionTime": "2018-08-08T08:05:07Z"
},
{
- "checksumSHA1": "lG3//eDlwqA4IOuAPrNtLh9G0TA=",
- "comment": "v0.8.0-9-g334af01",
+ "checksumSHA1": "d5BiEvD8MrgpWQ6PQJUvawJsMak=",
"path": "github.com/prometheus/client_golang/prometheus/promhttp",
- "revision": "334af0119a8f8fb6af5bb950d535c482cac7f836"
+ "revision": "5b23715facdef1452016bae512489c3cdf82458c",
+ "revisionTime": "2018-08-08T08:05:07Z"
},
{
- "checksumSHA1": "DvwvOlPNAgRntBzt3b3OSRMS2N4=",
- "comment": "model-0.0.2-12-gfa8ad6f",
+ "checksumSHA1": "V8xkqgmP66sq2ZW4QO5wi9a4oZE=",
"path": "github.com/prometheus/client_model/go",
- "revision": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
+ "revision": "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f",
+ "revisionTime": "2018-07-12T10:51:10Z"
},
{
- "checksumSHA1": "OFZ6f2tsxQ+pk+wl7474eIxBs80=",
+ "checksumSHA1": "Q0mjhUEjAklUQvPkrOChWGLpvRY=",
"path": "github.com/prometheus/common/expfmt",
- "revision": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
+ "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5",
+ "revisionTime": "2018-08-01T06:44:54Z"
},
{
"checksumSHA1": "GWlM3d2vPYyNATtTFgftS10/A9w=",
"path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg",
- "revision": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
+ "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5",
+ "revisionTime": "2018-08-01T06:44:54Z"
},
{
- "checksumSHA1": "Jx0GXl5hGnO25s3ryyvtdWHdCpw=",
+ "checksumSHA1": "EXTRY7DL9gFW8c341Dk6LDXCBn8=",
"path": "github.com/prometheus/common/model",
- "revision": "ebdfc6da46522d58825777cf1f90490a5b1ef1d8"
+ "revision": "c7de2306084e37d54b8be01f3541a8464345e9a5",
+ "revisionTime": "2018-08-01T06:44:54Z"
},
{
- "checksumSHA1": "W218eJZPXJG783fUr/z6IaAZyes=",
+ "checksumSHA1": "jo/zxF+Pfj5yZjReTKGOACq9IBs=",
"path": "github.com/prometheus/procfs",
- "revision": "abf152e5f3e97f2fafac028d2cc06c1feb87ffa5"
+ "revision": "05ee40e3a273f7245e8777337fc7b46e533a9a92",
+ "revisionTime": "2018-07-25T12:39:19Z"
+ },
+ {
+ "checksumSHA1": "lv9rIcjbVEGo8AT1UCUZXhXrfQc=",
+ "path": "github.com/prometheus/procfs/internal/util",
+ "revision": "05ee40e3a273f7245e8777337fc7b46e533a9a92",
+ "revisionTime": "2018-07-25T12:39:19Z"
+ },
+ {
+ "checksumSHA1": "HSP5hVT0CNMRa8+Xtz4z2Ic5U0E=",
+ "path": "github.com/prometheus/procfs/nfs",
+ "revision": "05ee40e3a273f7245e8777337fc7b46e533a9a92",
+ "revisionTime": "2018-07-25T12:39:19Z"
+ },
+ {
+ "checksumSHA1": "yItvTQLUVqm/ArLEbvEhqG0T5a0=",
+ "path": "github.com/prometheus/procfs/xfs",
+ "revision": "05ee40e3a273f7245e8777337fc7b46e533a9a92",
+ "revisionTime": "2018-07-25T12:39:19Z"
},
{
"checksumSHA1": "lZmPAbekxLbMFQirB/vzbdI+5O8=",